In [1]:
import glob
import math
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import random
import sklearn.metrics as metrics

from tensorflow.keras import optimizers
from tensorflow.keras import backend
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import add, concatenate, Conv2D, Dense, Dropout, Flatten, Input, Lambda, Reshape
from tensorflow.keras.layers import Activation, AveragePooling2D, BatchNormalization, MaxPooling2D, ZeroPadding2D
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import to_categorical


%matplotlib inline
In [2]:
                            # Set up 'ggplot' style
plt.style.use('ggplot')     # if want to use the default style, set 'classic'
plt.rcParams['ytick.right']     = True
plt.rcParams['ytick.labelright']= True
plt.rcParams['ytick.left']      = False
plt.rcParams['ytick.labelleft'] = False
plt.rcParams['font.family']     = 'Arial'
In [3]:
# where am i?
%pwd
Out[3]:
'C:\\Users\\david\\Documents\\ImageNet'
In [4]:
%ls
 Volume in drive C is Acer
 Volume Serial Number is F2E5-64E8

 Directory of C:\Users\david\Documents\ImageNet

09/29/2019  08:30 AM    <DIR>          .
09/29/2019  08:30 AM    <DIR>          ..
09/09/2019  01:02 AM                43 .gitattributes
08/22/2019  11:06 PM                26 .gitignore
09/28/2019  10:39 AM    <DIR>          .ipynb_checkpoints
09/20/2019  11:24 PM         2,012,453 AlexNet_Model_Train_Test.ipynb
09/28/2019  03:56 PM         2,195,565 Create_Train_Test_Set.ipynb
09/14/2019  03:53 PM    <DIR>          data
08/22/2019  11:09 PM           455,126 Download-ImageNet.html
09/09/2019  12:35 AM           288,923 Download-ImageNet.ipynb
09/03/2019  09:40 PM           367,769 Download-Pexels.html
09/09/2019  12:35 AM            94,549 Download-Pexels.ipynb
09/29/2019  08:16 AM             4,935 FlowerPower.csv
09/29/2019  08:16 AM        74,469,728 FlowerPower.hdf5
09/21/2019  06:45 AM            24,730 FlowerPower_best9307.csv
09/22/2019  03:39 AM        62,618,096 FlowerPower_InceptionResNetV2best9337.hdf5
09/22/2019  10:18 AM        62,618,096 FlowerPower_InceptionResNetV2best9467.hdf5
09/27/2019  05:46 PM        62,618,096 FlowerPower_InceptionResNetV2best9521.hdf5
09/21/2019  12:22 PM        98,136,496 FlowerPower_InceptionV4best9305.hdf5
09/21/2019  02:47 AM        98,136,496 FlowerPower_InceptionV4best9307.hdf5
09/21/2019  06:03 PM        98,136,496 FlowerPower_InceptionV4best9418.hdf5
09/28/2019  03:26 AM        98,136,496 FlowerPower_InceptionV4best9470.hdf5
09/22/2019  07:35 PM        74,469,728 FlowerPower_SEInceptionResNetV2best9107.hdf5
09/23/2019  02:54 AM        74,469,728 FlowerPower_SEInceptionResNetV2best9321.hdf5
09/29/2019  06:00 AM        74,469,728 FlowerPower_SEInceptionResNetV2best9429.hdf5
08/17/2019  11:53 AM           124,162 ImageNet-Flowers.txt
08/17/2019  03:54 PM            75,692 ImageNet-Fungus.txt
08/17/2019  03:57 PM            81,424 ImageNet-Rocks.txt
09/21/2019  01:06 PM            66,035 Inception-ResNet-v1 & v2.ipynb
09/15/2019  03:16 PM            58,343 Inception-v4.ipynb
09/14/2019  11:39 PM            26,103 model.pdf
09/21/2019  09:58 AM    <DIR>          npz
09/03/2019  09:40 PM           128,688 Pexels-Flowers.txt
09/03/2019  09:40 PM            28,575 Pexels-Umbrellas.txt
09/14/2019  04:01 PM    <DIR>          readings
08/22/2019  11:02 PM                44 README.md
09/20/2019  11:19 PM           303,323 Reshape_Resize_Images.ipynb
09/09/2019  12:48 AM         8,546,104 train_Neural_Network (Conv2D, 96-0.8).html
09/22/2019  06:50 AM         2,346,911 train_Neural_Network (InceptionResNetV2, 96-0.8, Added data + Rediced Layers, try17 = 93.37 p1).html
09/22/2019  02:01 PM         2,354,430 train_Neural_Network (InceptionResNetV2, 96-0.8, Added data + Rediced Layers, try18 = 94.67 p2).html
09/27/2019  08:20 PM        10,632,612 train_Neural_Network (InceptionResNetV2, 96-0.8, Added data + Rediced Layers, try19 = 95.21 p3).html
09/15/2019  10:09 PM         2,427,075 train_Neural_Network (InceptionResNetV2, 96-0.8, Added data, try13).html
09/15/2019  02:35 AM        12,032,935 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try10).html
09/15/2019  11:36 AM         2,387,331 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try11).html
09/15/2019  05:42 PM         2,291,568 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try12).html
09/16/2019  06:31 AM         5,790,782 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try14).html
09/21/2019  09:52 AM        14,198,219 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try15 = 93.07 p1).html
09/21/2019  06:41 PM         5,458,724 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try16 = 94.18 p2).html
09/28/2019  07:26 AM         2,822,769 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try20 = 94.70 p3).html
09/14/2019  08:36 PM         7,071,416 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp + Added data, try9).html
09/11/2019  01:01 AM         4,494,650 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try6).html
09/11/2019  10:59 PM         6,116,768 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try7).html
09/12/2019  02:35 AM         5,851,809 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try8).html
09/09/2019  03:08 AM         3,900,219 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try3).html
09/09/2019  11:09 PM         6,528,529 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try4).html
09/10/2019  08:44 PM         6,636,754 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try5).html
09/09/2019  01:32 AM         6,583,279 train_Neural_Network (ResNetV1, 96-0.8, Dropout, try1).html
09/09/2019  02:40 AM         6,300,696 train_Neural_Network (ResNetV1, 96-0.8, Dropout, try2).html
09/09/2019  01:23 AM         6,446,135 train_Neural_Network (ResNetV1, 96-0.8, no Dropout, try1).html
09/22/2019  10:46 PM        17,843,876 train_Neural_Network (SE InceptionResNetV2, 96-0.8, Added data + Rediced Layers, try18 = 91.07 p1).html
09/23/2019  06:34 AM        17,845,152 train_Neural_Network (SE InceptionResNetV2, 96-0.8, Added data + Rediced Layers, try18 = 93.21 p2).html
09/29/2019  07:01 AM         2,339,243 train_Neural_Network (SE InceptionResNetV2, 96-0.8, Added data + Rediced Layers, try21 = 94.29 p3).html
09/29/2019  08:30 AM            80,002 train_Neural_Network.ipynb
09/20/2019  11:24 PM         1,838,211 VGG_Model_Setup.ipynb
09/20/2019  11:24 PM            17,772 VGG_Model_Train_Test.ipynb
09/20/2019  11:24 PM         4,184,405 VGG_Model_Train_Test_baseline.ipynb
09/20/2019  11:24 PM         2,184,240 VGG_Model_Train_Test_Mod1.ipynb
09/20/2019  11:24 PM         2,112,869 VGG_Model_Train_Test_Mod2.ipynb
09/20/2019  11:24 PM         1,952,795 VGG_Model_Train_Test_Mod3.ipynb
              63 File(s)  1,066,233,972 bytes
               6 Dir(s)  55,880,114,176 bytes free
In [5]:
flowers = glob.glob('./data/flr_*.jpg')
fungus = glob.glob('./data/fgs_*.jpg')
rocks = glob.glob('./data/rck_*.jpg')

pixel_flowers = glob.glob('./data/pxl_flower_*.jpeg')
pixel_umbrella = glob.glob('./data/pxl_umbrella_*.jpeg')
print("There are %s, %s flower, %s fungus, %s rock and %s umbrella pictures" %(len(flowers), len(pixel_flowers), len(fungus), len(rocks), len(pixel_umbrella)))
There are 1269, 1792 flower, 856 fungus, 1007 rock and 420 umbrella pictures
In [6]:
# Randomly show 10 examples of the images
from IPython.display import Image
    
dataset = flowers #flowers #fungus #rocks

for i in range(0, 5):
    index = random.randint(0, len(dataset)-1)   
    print("Showing:", dataset[index])
    
    img = mpimg.imread(dataset[index])
    imgplot = plt.imshow(img)
    plt.show()

#Image(dataset[index])
Showing: ./data\flr_01449.jpg
Showing: ./data\flr_00089.jpg
Showing: ./data\flr_01329.jpg
Showing: ./data\flr_01574.jpg
Showing: ./data\flr_01821.jpg

Extract the training and testing datasets

In [7]:
# Load the data
trDatOrg       = np.load('npz/flrnonflr-train-imgs96-0.8+.npz')['arr_0']
trLblOrg       = np.load('npz/flrnonflr-train-labels96-0.8+.npz')['arr_0']
tsDatOrg       = np.load('npz/flrnonflr-test-imgs96-0.8+.npz')['arr_0']
tsLblOrg       = np.load('npz/flrnonflr-test-labels96-0.8+.npz')['arr_0']
In [8]:
print("For the training and test datasets:")
print("The shapes are %s, %s, %s, %s" \
      %(trDatOrg.shape, trLblOrg.shape, tsDatOrg.shape, tsLblOrg.shape))
For the training and test datasets:
The shapes are (14784, 96, 96, 3), (14784,), (3696, 96, 96, 3), (3696,)
In [9]:
# Randomly show 10 examples of the images

data = tsDatOrg
label = tsLblOrg

for i in range(20):
    index = random.randint(0, len(data)-1)
    print("Showing %s index image, It is %s" %(index, label[index]))
    imgplot = plt.imshow(data[index])
    plt.show()
Showing 3592 index image, It is 0.0
Showing 2085 index image, It is 1.0
Showing 680 index image, It is 1.0
Showing 1821 index image, It is 1.0
Showing 2238 index image, It is 1.0
Showing 1100 index image, It is 1.0
Showing 1284 index image, It is 1.0
Showing 2756 index image, It is 0.0
Showing 68 index image, It is 1.0
Showing 2090 index image, It is 1.0
Showing 3606 index image, It is 0.0
Showing 434 index image, It is 1.0
Showing 3275 index image, It is 0.0
Showing 1308 index image, It is 1.0
Showing 252 index image, It is 1.0
Showing 1686 index image, It is 1.0
Showing 373 index image, It is 1.0
Showing 872 index image, It is 1.0
Showing 433 index image, It is 1.0
Showing 2452 index image, It is 0.0
In [10]:
# Convert the data into 'float32'
# Rescale the values from 0~255 to 0~1
trDat       = trDatOrg.astype('float32')/255
tsDat       = tsDatOrg.astype('float32')/255

# Retrieve the row size of each image
# Retrieve the column size of each image
imgrows     = trDat.shape[1]
imgclms     = trDat.shape[2]
channel     = 3

# # reshape the data to be [samples][width][height][channel]
# # This is required by Keras framework
# trDat       = trDat.reshape(trDat.shape[0], imgrows, imgclms, channel)
# tsDat       = tsDat.reshape(tsDat.shape[0], imgrows, imgclms, channel)

# Perform one hot encoding on the labels
# Retrieve the number of classes in this problem
trLbl       = to_categorical(trLblOrg)
tsLbl       = to_categorical(tsLblOrg)
num_classes = tsLbl.shape[1]
In [11]:
# fix random seed for reproducibility
seed = 29
np.random.seed(seed)


modelname = 'FlowerPower'

#optmz = optimizers.Adam(lr=0.001)
optmz = optimizers.RMSprop(lr=0.001)
In [12]:
# Baseline Model -> func: createBaselineModel()

def createBaselineModel():
    inputs = Input(shape=(imgrows, imgclms, channel))
    x = Conv2D(30, (4, 4), activation='relu')(inputs)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Conv2D(50, (4, 4), activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.3)(x)
    x = Flatten()(x)
    x = Dense(32, activation='relu')(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=[inputs],outputs=x)
    
    model.compile(loss='categorical_crossentropy', 
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
In [13]:
# ResNetV1 -> func: createResNetV1()
def resLyr(inputs,
           numFilters=16,
           kernelSz=3,
           strides=1,
           activation='relu',
           batchNorm=True,
           convFirst=True,
           lyrName=None):
    convLyr = Conv2D(numFilters, kernel_size=kernelSz, strides=strides, 
                     padding='same', kernel_initializer='he_normal', 
                     kernel_regularizer=l2(1e-4), 
                     name=lyrName+'_conv' if lyrName else None)
    x = inputs
    if convFirst:
        x = convLyr(x)
        if batchNorm:
            x = BatchNormalization(name=lyrName+'_bn' if lyrName else None)(x)
        if activation is not None:
            x = Activation(activation,name=lyrName+'_'+activation if lyrName else None)(x)
    else:
        if batchNorm:
            x = BatchNormalization(name=lyrName+'_bn' if lyrName else None)(x)
        if activation is not None:
            x = Activation(activation, name=lyrName+'_'+activation if lyrName else None)(x)
        x = convLyr(x)
    return x


def resBlkV1(inputs,
             numFilters=16,
             numBlocks=3,
             downsampleOnFirst=True,
             names=None):
    x = inputs
    for run in range(0,numBlocks):
        strides = 1
        blkStr = str(run+1)
        if downsampleOnFirst and run == 0:
            strides = 2
        y = resLyr(inputs=x, numFilters=numFilters, strides=strides,
                   lyrName=names+'_Blk'+blkStr+'_Res1' if names else None)
        y = resLyr(inputs=y, numFilters=numFilters, activation=None,
                   lyrName=names+'_Blk'+blkStr+'_Res2' if names else None)
        if downsampleOnFirst and run == 0:
            x = resLyr(inputs=x, numFilters=numFilters, kernelSz=1,
                       strides=strides, activation=None, batchNorm=False,
                       lyrName=names+'_Blk'+blkStr+'_lin' if names else None)
        x = add([x,y], name=names+'_Blk'+blkStr+'_add' if names else None)
        x = Activation('relu', name=names+'_Blk'+blkStr+'_relu' if names else None)(x)
    return x

def createResNetV1(inputShape=(imgrows, imgclms, channel),
                   numClasses=2):
    inputs = Input(shape=inputShape)
    v = resLyr(inputs, lyrName='Inpt')
    v = resBlkV1(inputs=v, numFilters=16, numBlocks=3,
                 downsampleOnFirst=False, names='Stg1')
    v = Dropout(0.30)(v)
    v = resBlkV1(inputs=v, numFilters=32, numBlocks=3,
                 downsampleOnFirst=True, names='Stg2')
    v = Dropout(0.40)(v)
    v = resBlkV1(inputs=v, numFilters=64, numBlocks=3,
                 downsampleOnFirst=True, names='Stg3')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=128, numBlocks=3,
                 downsampleOnFirst=True, names='Stg4')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=128, numBlocks=3,
                 downsampleOnFirst=False, names='Stg5')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=256, numBlocks=3,
                 downsampleOnFirst=True, names='Stg6')
    v = Dropout(0.50)(v)
    v = AveragePooling2D(pool_size=6, name='AvgPool')(v)
    v = Flatten()(v) 
    outputs = Dense(numClasses, activation='softmax', 
                    kernel_initializer='he_normal')(v)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer=optmz, 
                  metrics=['accuracy'])
    return model
In [14]:
# Mostly Original # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=192, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=224, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=256, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=448, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=512, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=320, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=320, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = reduction_b_block(x)
    
    x = inception_c_block(x)
    x = inception_c_block(x)
    x = inception_c_block(x)
    
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1536)(x) # Changed
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [15]:
# Modified2 # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=192, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=224, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=256, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=448, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=512, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=320, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=320, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = reduction_b_block(x)
    
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1536)(x) # Changed
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [16]:
# Modified #(halfed) # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=16, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=48, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=32, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=32, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=48, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=48, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=112, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=128, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=112, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=112, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=128, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=128, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=128, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 96, 96, 128, 192
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 128, 128, 192, 192
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=128, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=160, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=160, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = reduction_b_block(x)
    
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(256)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer=optmz, 
                  metrics=['accuracy'])
    return model 
In [17]:
# Mostly Original # Inception-Res-v2 -> func: create_inception_resnet_v2()
def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_resnet_v2_a_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs
    
    x_L_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_M_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x_M_1)
    
    x_R_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_M_2, x_R_3])
    x_C_2 = Conv2D(filters=384, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_b_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=160, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=192, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=1152, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)

    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_c_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=2048, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def inception_resnet_v2_reduction_b_block(inputs,
                      names=None):   
    x = inputs
    
    x_EL_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_ML_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ML_2 = Conv2D(filters=384, kernel_size=(3, 3), strides=2, padding='valid')(x_ML_1)
    
    x_MR_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_MR_1)
    
    x_ER_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same')(x_ER_1)
    x_ER_3 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_ER_2)
    
    x = concatenate([x_EL_1, x_ML_2, x_MR_2, x_ER_3])
    return x

def create_inception_resnet_v2(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-ResNet-v2"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_reduction_b_block(x)
    
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1792)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [18]:
# Modified # Inception-Res-v2 -> func: create_inception_resnet_v2() 1,3,1 & 0.5
def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_resnet_v2_a_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs
    
    x_L_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_M_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x_M_1)
    
    x_R_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_M_2, x_R_3])
    x_C_2 = Conv2D(filters=384, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_b_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=80, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=96, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=672, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)

    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_c_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=114, kernel_size=(1, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=1120, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 96, 112, 128, 192
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 96, 96, 128, 192
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 128, 128, 96, 192
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def inception_resnet_v2_reduction_b_block(inputs,
                      names=None):   
    x = inputs
    
    x_EL_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_ML_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ML_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_ML_1)
    
    x_MR_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR_2 = Conv2D(filters=128, kernel_size=(3, 3), strides=2, padding='valid')(x_MR_1)
    
    x_ER_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER_2 = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding='same')(x_ER_1)
    x_ER_3 = Conv2D(filters=128, kernel_size=(3, 3), strides=2, padding='valid')(x_ER_2)
    
    x = concatenate([x_EL_1, x_ML_2, x_MR_2, x_ER_3])
    return x

def create_inception_resnet_v2(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-ResNet-v2"
    inputs = Input(shape=inputShape)
    
    batch_norm = True
    no_a_block = 1
    no_b_block = 3
    no_c_block = 1
    
    x = stem_block(inputs)
    
    for i in range(no_a_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_a_block(x, scale=0.15)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    for i in range(no_b_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_b_block(x, scale=0.15)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_reduction_b_block(x)
    
    for i in range(no_c_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_c_block(x, scale=0.15)
    x = BatchNormalization()(x)

    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(896)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [19]:
# Modified # SE Inception-Res-v2 -> func: create_se_inception_resnet_v2() 1,3,1 & 0.5
def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_resnet_v2_a_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs
    
    x_L_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_M_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x_M_1)
    
    x_R_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_M_2, x_R_3])
    x_C_2 = Conv2D(filters=384, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_b_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=80, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=96, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=672, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)

    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_c_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=114, kernel_size=(1, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=1120, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 96, 96, 128, 192
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 128, 128, 96, 192
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def inception_resnet_v2_reduction_b_block(inputs,
                      names=None):   
    x = inputs
    
    x_EL_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_ML_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ML_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_ML_1)
    
    x_MR_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR_2 = Conv2D(filters=128, kernel_size=(3, 3), strides=2, padding='valid')(x_MR_1)
    
    x_ER_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER_2 = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding='same')(x_ER_1)
    x_ER_3 = Conv2D(filters=128, kernel_size=(3, 3), strides=2, padding='valid')(x_ER_2)
    
    x = concatenate([x_EL_1, x_ML_2, x_MR_2, x_ER_3])
    return x

def squeeze_excitation_layer(inputs, out_dim, ratio, layer_name):

    x = inputs
    
    x = GlobalAveragePooling2D()(x)
    x = Dense(units=out_dim / ratio, use_bias=True)(x)
    x = Activation(activation='relu')(x)
    x = Dense(units=out_dim, use_bias=True)(x)
    x = Activation(activation='sigmoid')(x)

    x = Reshape([1, 1, out_dim])(x)
    
    scale = Lambda(lambda ipt: ipt[0] * ipt[1] ,
                          output_shape=backend.int_shape(x)[1:])([inputs, x])

    return scale
        
def create_se_inception_resnet_v2(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-ResNet-v2"
    inputs = Input(shape=inputShape)
    
    batch_norm = True
    no_a_block = 1
    no_b_block = 3
    no_c_block = 1
    reduction_ratio = 4
    
    x = stem_block(inputs)
    
    for i in range(no_a_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_a_block(x, scale=0.15)
        chnl = int(np.shape(x)[-1])
        x = squeeze_excitation_layer(x, out_dim=chnl, ratio=reduction_ratio, layer_name='SE_A'+str(i))
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    chnl = int(np.shape(x)[-1])
    x = squeeze_excitation_layer(x, out_dim=chnl, ratio=reduction_ratio, layer_name='SE_A')
    
    for i in range(no_b_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_b_block(x, scale=0.15)
        chnl = int(np.shape(x)[-1])
        x = squeeze_excitation_layer(x, out_dim=chnl, ratio=reduction_ratio, layer_name='SE_B'+str(i))
    x = BatchNormalization()(x)
    x = inception_resnet_v2_reduction_b_block(x)
    
    for i in range(no_c_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_c_block(x, scale=0.15)
    x = BatchNormalization()(x)

    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(896)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [20]:
# Setup the models
model       = create_se_inception_resnet_v2() # This is meant for training
modelGo     = create_se_inception_resnet_v2() # This is used for final testing

model.summary()
WARNING:tensorflow:From D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\keras\initializers.py:104: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with distribution=normal is deprecated and will be removed in a future version.
Instructions for updating:
`normal` is a deprecated alias for `truncated_normal`
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            (None, 96, 96, 3)    0                                            
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 47, 47, 32)   896         input_1[0][0]                    
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 45, 45, 32)   9248        conv2d[0][0]                     
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 45, 45, 64)   18496       conv2d_1[0][0]                   
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 22, 22, 64)   0           conv2d_2[0][0]                   
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 22, 22, 96)   55392       conv2d_2[0][0]                   
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 22, 22, 160)  0           max_pooling2d[0][0]              
                                                                 conv2d_3[0][0]                   
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 22, 22, 64)   10304       concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 22, 22, 64)   28736       conv2d_6[0][0]                   
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 22, 22, 64)   10304       concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 22, 22, 64)   28736       conv2d_7[0][0]                   
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 20, 20, 96)   55392       conv2d_4[0][0]                   
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 20, 20, 96)   55392       conv2d_8[0][0]                   
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 20, 20, 192)  0           conv2d_5[0][0]                   
                                                                 conv2d_9[0][0]                   
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 9, 9, 192)    331968      concatenate_1[0][0]              
__________________________________________________________________________________________________
zero_padding2d (ZeroPadding2D)  (None, 10, 10, 192)  0           conv2d_10[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 10, 10, 192)  0           concatenate_1[0][0]              
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 10, 10, 384)  0           zero_padding2d[0][0]             
                                                                 max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 10, 10, 384)  1536        concatenate_2[0][0]              
__________________________________________________________________________________________________
conv2d_14 (Conv2D)              (None, 10, 10, 32)   12320       batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 10, 10, 32)   12320       batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_15 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_14[0][0]                  
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 10, 10, 32)   12320       batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_13 (Conv2D)              (None, 10, 10, 32)   9248        conv2d_12[0][0]                  
__________________________________________________________________________________________________
conv2d_16 (Conv2D)              (None, 10, 10, 64)   27712       conv2d_15[0][0]                  
__________________________________________________________________________________________________
concatenate_3 (Concatenate)     (None, 10, 10, 128)  0           conv2d_11[0][0]                  
                                                                 conv2d_13[0][0]                  
                                                                 conv2d_16[0][0]                  
__________________________________________________________________________________________________
conv2d_17 (Conv2D)              (None, 10, 10, 384)  49536       concatenate_3[0][0]              
__________________________________________________________________________________________________
lambda (Lambda)                 (None, 10, 10, 384)  0           batch_normalization[0][0]        
                                                                 conv2d_17[0][0]                  
__________________________________________________________________________________________________
activation (Activation)         (None, 10, 10, 384)  0           lambda[0][0]                     
__________________________________________________________________________________________________
global_average_pooling2d (Globa (None, 384)          0           activation[0][0]                 
__________________________________________________________________________________________________
dense (Dense)                   (None, 96)           36960       global_average_pooling2d[0][0]   
__________________________________________________________________________________________________
activation_1 (Activation)       (None, 96)           0           dense[0][0]                      
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 384)          37248       activation_1[0][0]               
__________________________________________________________________________________________________
activation_2 (Activation)       (None, 384)          0           dense_1[0][0]                    
__________________________________________________________________________________________________
reshape (Reshape)               (None, 1, 1, 384)    0           activation_2[0][0]               
__________________________________________________________________________________________________
lambda_1 (Lambda)               (None, 10, 10, 384)  0           activation[0][0]                 
                                                                 reshape[0][0]                    
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 10, 10, 384)  1536        lambda_1[0][0]                   
__________________________________________________________________________________________________
conv2d_19 (Conv2D)              (None, 10, 10, 128)  49280       batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_20 (Conv2D)              (None, 10, 10, 128)  147584      conv2d_19[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)  (None, 4, 4, 384)    0           batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_18 (Conv2D)              (None, 4, 4, 192)    663744      batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_21 (Conv2D)              (None, 4, 4, 96)     110688      conv2d_20[0][0]                  
__________________________________________________________________________________________________
concatenate_4 (Concatenate)     (None, 4, 4, 672)    0           max_pooling2d_2[0][0]            
                                                                 conv2d_18[0][0]                  
                                                                 conv2d_21[0][0]                  
__________________________________________________________________________________________________
global_average_pooling2d_1 (Glo (None, 672)          0           concatenate_4[0][0]              
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 168)          113064      global_average_pooling2d_1[0][0] 
__________________________________________________________________________________________________
activation_3 (Activation)       (None, 168)          0           dense_2[0][0]                    
__________________________________________________________________________________________________
dense_3 (Dense)                 (None, 672)          113568      activation_3[0][0]               
__________________________________________________________________________________________________
activation_4 (Activation)       (None, 672)          0           dense_3[0][0]                    
__________________________________________________________________________________________________
reshape_1 (Reshape)             (None, 1, 1, 672)    0           activation_4[0][0]               
__________________________________________________________________________________________________
lambda_2 (Lambda)               (None, 4, 4, 672)    0           concatenate_4[0][0]              
                                                                 reshape_1[0][0]                  
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 4, 4, 672)    2688        lambda_2[0][0]                   
__________________________________________________________________________________________________
conv2d_23 (Conv2D)              (None, 4, 4, 64)     43072       batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_24 (Conv2D)              (None, 4, 4, 80)     35920       conv2d_23[0][0]                  
__________________________________________________________________________________________________
conv2d_22 (Conv2D)              (None, 4, 4, 96)     64608       batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_25 (Conv2D)              (None, 4, 4, 96)     53856       conv2d_24[0][0]                  
__________________________________________________________________________________________________
concatenate_5 (Concatenate)     (None, 4, 4, 192)    0           conv2d_22[0][0]                  
                                                                 conv2d_25[0][0]                  
__________________________________________________________________________________________________
conv2d_26 (Conv2D)              (None, 4, 4, 672)    129696      concatenate_5[0][0]              
__________________________________________________________________________________________________
lambda_3 (Lambda)               (None, 4, 4, 672)    0           batch_normalization_2[0][0]      
                                                                 conv2d_26[0][0]                  
__________________________________________________________________________________________________
activation_5 (Activation)       (None, 4, 4, 672)    0           lambda_3[0][0]                   
__________________________________________________________________________________________________
global_average_pooling2d_2 (Glo (None, 672)          0           activation_5[0][0]               
__________________________________________________________________________________________________
dense_4 (Dense)                 (None, 168)          113064      global_average_pooling2d_2[0][0] 
__________________________________________________________________________________________________
activation_6 (Activation)       (None, 168)          0           dense_4[0][0]                    
__________________________________________________________________________________________________
dense_5 (Dense)                 (None, 672)          113568      activation_6[0][0]               
__________________________________________________________________________________________________
activation_7 (Activation)       (None, 672)          0           dense_5[0][0]                    
__________________________________________________________________________________________________
reshape_2 (Reshape)             (None, 1, 1, 672)    0           activation_7[0][0]               
__________________________________________________________________________________________________
lambda_4 (Lambda)               (None, 4, 4, 672)    0           activation_5[0][0]               
                                                                 reshape_2[0][0]                  
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 4, 4, 672)    2688        lambda_4[0][0]                   
__________________________________________________________________________________________________
conv2d_28 (Conv2D)              (None, 4, 4, 64)     43072       batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_29 (Conv2D)              (None, 4, 4, 80)     35920       conv2d_28[0][0]                  
__________________________________________________________________________________________________
conv2d_27 (Conv2D)              (None, 4, 4, 96)     64608       batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_30 (Conv2D)              (None, 4, 4, 96)     53856       conv2d_29[0][0]                  
__________________________________________________________________________________________________
concatenate_6 (Concatenate)     (None, 4, 4, 192)    0           conv2d_27[0][0]                  
                                                                 conv2d_30[0][0]                  
__________________________________________________________________________________________________
conv2d_31 (Conv2D)              (None, 4, 4, 672)    129696      concatenate_6[0][0]              
__________________________________________________________________________________________________
lambda_5 (Lambda)               (None, 4, 4, 672)    0           batch_normalization_3[0][0]      
                                                                 conv2d_31[0][0]                  
__________________________________________________________________________________________________
activation_8 (Activation)       (None, 4, 4, 672)    0           lambda_5[0][0]                   
__________________________________________________________________________________________________
global_average_pooling2d_3 (Glo (None, 672)          0           activation_8[0][0]               
__________________________________________________________________________________________________
dense_6 (Dense)                 (None, 168)          113064      global_average_pooling2d_3[0][0] 
__________________________________________________________________________________________________
activation_9 (Activation)       (None, 168)          0           dense_6[0][0]                    
__________________________________________________________________________________________________
dense_7 (Dense)                 (None, 672)          113568      activation_9[0][0]               
__________________________________________________________________________________________________
activation_10 (Activation)      (None, 672)          0           dense_7[0][0]                    
__________________________________________________________________________________________________
reshape_3 (Reshape)             (None, 1, 1, 672)    0           activation_10[0][0]              
__________________________________________________________________________________________________
lambda_6 (Lambda)               (None, 4, 4, 672)    0           activation_8[0][0]               
                                                                 reshape_3[0][0]                  
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 4, 4, 672)    2688        lambda_6[0][0]                   
__________________________________________________________________________________________________
conv2d_33 (Conv2D)              (None, 4, 4, 64)     43072       batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_34 (Conv2D)              (None, 4, 4, 80)     35920       conv2d_33[0][0]                  
__________________________________________________________________________________________________
conv2d_32 (Conv2D)              (None, 4, 4, 96)     64608       batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_35 (Conv2D)              (None, 4, 4, 96)     53856       conv2d_34[0][0]                  
__________________________________________________________________________________________________
concatenate_7 (Concatenate)     (None, 4, 4, 192)    0           conv2d_32[0][0]                  
                                                                 conv2d_35[0][0]                  
__________________________________________________________________________________________________
conv2d_36 (Conv2D)              (None, 4, 4, 672)    129696      concatenate_7[0][0]              
__________________________________________________________________________________________________
lambda_7 (Lambda)               (None, 4, 4, 672)    0           batch_normalization_4[0][0]      
                                                                 conv2d_36[0][0]                  
__________________________________________________________________________________________________
activation_11 (Activation)      (None, 4, 4, 672)    0           lambda_7[0][0]                   
__________________________________________________________________________________________________
global_average_pooling2d_4 (Glo (None, 672)          0           activation_11[0][0]              
__________________________________________________________________________________________________
dense_8 (Dense)                 (None, 168)          113064      global_average_pooling2d_4[0][0] 
__________________________________________________________________________________________________
activation_12 (Activation)      (None, 168)          0           dense_8[0][0]                    
__________________________________________________________________________________________________
dense_9 (Dense)                 (None, 672)          113568      activation_12[0][0]              
__________________________________________________________________________________________________
activation_13 (Activation)      (None, 672)          0           dense_9[0][0]                    
__________________________________________________________________________________________________
reshape_4 (Reshape)             (None, 1, 1, 672)    0           activation_13[0][0]              
__________________________________________________________________________________________________
lambda_8 (Lambda)               (None, 4, 4, 672)    0           activation_11[0][0]              
                                                                 reshape_4[0][0]                  
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 4, 4, 672)    2688        lambda_8[0][0]                   
__________________________________________________________________________________________________
conv2d_41 (Conv2D)              (None, 4, 4, 128)    86144       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_37 (Conv2D)              (None, 4, 4, 128)    86144       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_39 (Conv2D)              (None, 4, 4, 128)    86144       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_42 (Conv2D)              (None, 4, 4, 128)    147584      conv2d_41[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)  (None, 1, 1, 672)    0           batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_38 (Conv2D)              (None, 1, 1, 192)    221376      conv2d_37[0][0]                  
__________________________________________________________________________________________________
conv2d_40 (Conv2D)              (None, 1, 1, 128)    147584      conv2d_39[0][0]                  
__________________________________________________________________________________________________
conv2d_43 (Conv2D)              (None, 1, 1, 128)    147584      conv2d_42[0][0]                  
__________________________________________________________________________________________________
concatenate_8 (Concatenate)     (None, 1, 1, 1120)   0           max_pooling2d_3[0][0]            
                                                                 conv2d_38[0][0]                  
                                                                 conv2d_40[0][0]                  
                                                                 conv2d_43[0][0]                  
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 1, 1, 1120)   4480        concatenate_8[0][0]              
__________________________________________________________________________________________________
conv2d_45 (Conv2D)              (None, 1, 1, 96)     107616      batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_46 (Conv2D)              (None, 1, 1, 114)    32946       conv2d_45[0][0]                  
__________________________________________________________________________________________________
conv2d_44 (Conv2D)              (None, 1, 1, 96)     107616      batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_47 (Conv2D)              (None, 1, 1, 128)    43904       conv2d_46[0][0]                  
__________________________________________________________________________________________________
concatenate_9 (Concatenate)     (None, 1, 1, 224)    0           conv2d_44[0][0]                  
                                                                 conv2d_47[0][0]                  
__________________________________________________________________________________________________
conv2d_48 (Conv2D)              (None, 1, 1, 1120)   252000      concatenate_9[0][0]              
__________________________________________________________________________________________________
lambda_9 (Lambda)               (None, 1, 1, 1120)   0           batch_normalization_6[0][0]      
                                                                 conv2d_48[0][0]                  
__________________________________________________________________________________________________
activation_14 (Activation)      (None, 1, 1, 1120)   0           lambda_9[0][0]                   
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 1, 1, 1120)   4480        activation_14[0][0]              
__________________________________________________________________________________________________
average_pooling2d (AveragePooli (None, 1, 1, 1120)   0           batch_normalization_7[0][0]      
__________________________________________________________________________________________________
flatten (Flatten)               (None, 1120)         0           average_pooling2d[0][0]          
__________________________________________________________________________________________________
dense_10 (Dense)                (None, 896)          1004416     flatten[0][0]                    
__________________________________________________________________________________________________
dropout (Dropout)               (None, 896)          0           dense_10[0][0]                   
__________________________________________________________________________________________________
dense_11 (Dense)                (None, 2)            1794        dropout[0][0]                    
==================================================================================================
Total params: 6,171,316
Trainable params: 6,159,924
Non-trainable params: 11,392
__________________________________________________________________________________________________
In [21]:
continue_training = True

if continue_training == True:
    best_model_filepath = "FlowerPower_SEInceptionResNetV2best9429.hdf5"
    model.load_weights(best_model_filepath)
In [22]:
# Create checkpoint for the training
# This checkpoint performs model saving when
# an epoch gives highest testing accuracy
# filepath        = modelname + ".hdf5"
# checkpoint      = ModelCheckpoint(filepath, 
#                                   monitor='val_acc', 
#                                   verbose=0, 
#                                   save_best_only=True, 
#                                   mode='max')

#                             # Log the epoch detail into csv
# csv_logger      = CSVLogger(modelname +'.csv')
# callbacks_list  = [checkpoint, csv_logger]

def lrSchedule(epoch):
    lr  = 1e-3
    
    if epoch > 270: #190
        lr  *= 0.5e-3
        
    elif epoch > 240: #160
        lr  *= 1e-3
        
    elif epoch > 200: #140
        lr  *= 1e-2
        
    elif epoch > 150: #100
        lr  *= 1e-1
        
    print('Learning rate: ', lr)
    
    return lr

LRScheduler     = LearningRateScheduler(lrSchedule)

                            # Create checkpoint for the training
                            # This checkpoint performs model saving when
                            # an epoch gives highest testing accuracy
filepath        = modelname + ".hdf5"
checkpoint      = ModelCheckpoint(filepath, 
                                  monitor='val_acc', 
                                  verbose=0, 
                                  save_best_only=True, 
                                  mode='max')

                            # Log the epoch detail into csv
csv_logger      = CSVLogger(modelname +'.csv')
callbacks_list  = [checkpoint, csv_logger, LRScheduler]
In [23]:
# Fit the model
# This is where the training starts
# model.fit(trDat, 
#           trLbl, 
#           validation_data=(tsDat, tsLbl), 
#           epochs=60, 
#           batch_size=32,
#           callbacks=callbacks_list)

datagen = ImageDataGenerator(width_shift_range=0.25,
                             height_shift_range=0.25,
                             rotation_range=45,
                             zoom_range=0.8,
                             #zca_epsilon=1e-6,
                             #zca_whitening=True,
                             fill_mode='nearest',
                             horizontal_flip=True,
                             vertical_flip=False)

model.fit_generator(datagen.flow(trDat, trLbl, batch_size=16),
                    validation_data=(tsDat, tsLbl),
                    epochs=300, #300 
                    verbose=1,
                    steps_per_epoch=len(trDat)/16,
                    callbacks=callbacks_list)
Learning rate:  0.001
Epoch 1/300
924/924 [==============================] - 98s 106ms/step - loss: 0.2210 - acc: 0.9166 - val_loss: 0.1885 - val_acc: 0.9267
Learning rate:  0.001
Epoch 2/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2528 - acc: 0.8994 - val_loss: 0.1855 - val_acc: 0.9307
Learning rate:  0.001
Epoch 3/300
924/924 [==============================] - 76s 83ms/step - loss: 0.2298 - acc: 0.9117 - val_loss: 0.1742 - val_acc: 0.9334
Learning rate:  0.001
Epoch 4/300
924/924 [==============================] - 72s 77ms/step - loss: 0.2252 - acc: 0.9141 - val_loss: 0.1667 - val_acc: 0.9391
Learning rate:  0.001
Epoch 5/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2163 - acc: 0.9153 - val_loss: 0.1678 - val_acc: 0.9340
Learning rate:  0.001
Epoch 6/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2162 - acc: 0.9189 - val_loss: 0.1988 - val_acc: 0.9242
Learning rate:  0.001
Epoch 7/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2164 - acc: 0.9177 - val_loss: 0.1822 - val_acc: 0.9267
Learning rate:  0.001
Epoch 8/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2138 - acc: 0.9163 - val_loss: 0.1755 - val_acc: 0.9329
Learning rate:  0.001
Epoch 9/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2171 - acc: 0.9178 - val_loss: 0.1688 - val_acc: 0.9399
Learning rate:  0.001
Epoch 10/300
924/924 [==============================] - 77s 84ms/step - loss: 0.2152 - acc: 0.9191 - val_loss: 0.1734 - val_acc: 0.9399
Learning rate:  0.001
Epoch 11/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2346 - acc: 0.9102 - val_loss: 0.1964 - val_acc: 0.9234
Learning rate:  0.001
Epoch 12/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2157 - acc: 0.9177 - val_loss: 0.1819 - val_acc: 0.9343
Learning rate:  0.001
Epoch 13/300
924/924 [==============================] - 74s 81ms/step - loss: 0.2171 - acc: 0.9161 - val_loss: 0.1724 - val_acc: 0.9345
Learning rate:  0.001
Epoch 14/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2153 - acc: 0.9171 - val_loss: 0.2008 - val_acc: 0.9221
Learning rate:  0.001
Epoch 15/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2178 - acc: 0.9166 - val_loss: 0.1743 - val_acc: 0.9375
Learning rate:  0.001
Epoch 16/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2174 - acc: 0.9176 - val_loss: 0.1936 - val_acc: 0.9278
Learning rate:  0.001
Epoch 17/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2205 - acc: 0.9142 - val_loss: 0.1645 - val_acc: 0.9416
Learning rate:  0.001
Epoch 18/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2204 - acc: 0.9162 - val_loss: 0.1746 - val_acc: 0.9321
Learning rate:  0.001
Epoch 19/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2200 - acc: 0.9140 - val_loss: 0.2475 - val_acc: 0.8888
Learning rate:  0.001
Epoch 20/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2192 - acc: 0.9159 - val_loss: 0.2199 - val_acc: 0.9175
Learning rate:  0.001
Epoch 21/300
924/924 [==============================] - 70s 76ms/step - loss: 0.2196 - acc: 0.9187 - val_loss: 0.1728 - val_acc: 0.9405
Learning rate:  0.001
Epoch 22/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2135 - acc: 0.9177 - val_loss: 0.2085 - val_acc: 0.9242
Learning rate:  0.001
Epoch 23/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2213 - acc: 0.9171 - val_loss: 0.1716 - val_acc: 0.9356
Learning rate:  0.001
Epoch 24/300
924/924 [==============================] - 70s 76ms/step - loss: 0.2196 - acc: 0.9154 - val_loss: 0.1784 - val_acc: 0.9372
Learning rate:  0.001
Epoch 25/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2182 - acc: 0.9175 - val_loss: 0.1729 - val_acc: 0.9353
Learning rate:  0.001
Epoch 26/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2187 - acc: 0.9160 - val_loss: 0.1858 - val_acc: 0.9288
Learning rate:  0.001
Epoch 27/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2212 - acc: 0.9167 - val_loss: 0.1804 - val_acc: 0.9343
Learning rate:  0.001
Epoch 28/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2245 - acc: 0.9144 - val_loss: 0.2185 - val_acc: 0.9153
Learning rate:  0.001
Epoch 29/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2210 - acc: 0.9158 - val_loss: 0.1802 - val_acc: 0.9291
Learning rate:  0.001
Epoch 30/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2174 - acc: 0.9135 - val_loss: 0.1679 - val_acc: 0.9343
Learning rate:  0.001
Epoch 31/300
924/924 [==============================] - 70s 75ms/step - loss: 0.2157 - acc: 0.9168 - val_loss: 0.1756 - val_acc: 0.9391
Learning rate:  0.001
Epoch 32/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2207 - acc: 0.9169 - val_loss: 0.1676 - val_acc: 0.9372
Learning rate:  0.001
Epoch 33/300
924/924 [==============================] - 70s 75ms/step - loss: 0.2238 - acc: 0.9158 - val_loss: 0.1804 - val_acc: 0.9345
Learning rate:  0.001
Epoch 34/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2122 - acc: 0.9186 - val_loss: 0.1870 - val_acc: 0.9288
Learning rate:  0.001
Epoch 35/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2141 - acc: 0.9157 - val_loss: 0.1646 - val_acc: 0.9378
Learning rate:  0.001
Epoch 36/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2168 - acc: 0.9193 - val_loss: 0.1615 - val_acc: 0.9389
Learning rate:  0.001
Epoch 37/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2154 - acc: 0.9189 - val_loss: 0.1737 - val_acc: 0.9318
Learning rate:  0.001
Epoch 38/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2178 - acc: 0.9186 - val_loss: 0.1621 - val_acc: 0.9399
Learning rate:  0.001
Epoch 39/300
924/924 [==============================] - 76s 83ms/step - loss: 0.2187 - acc: 0.9180 - val_loss: 0.1696 - val_acc: 0.9380
Learning rate:  0.001
Epoch 40/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2114 - acc: 0.9202 - val_loss: 0.2172 - val_acc: 0.9180
Learning rate:  0.001
Epoch 41/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2131 - acc: 0.9169 - val_loss: 0.1784 - val_acc: 0.9334
Learning rate:  0.001
Epoch 42/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2125 - acc: 0.9190 - val_loss: 0.1832 - val_acc: 0.9297
Learning rate:  0.001
Epoch 43/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2108 - acc: 0.9207 - val_loss: 0.1924 - val_acc: 0.9305
Learning rate:  0.001
Epoch 44/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2114 - acc: 0.9176 - val_loss: 0.1682 - val_acc: 0.9367
Learning rate:  0.001
Epoch 45/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2123 - acc: 0.9192 - val_loss: 0.1825 - val_acc: 0.9291
Learning rate:  0.001
Epoch 46/300
924/924 [==============================] - 72s 77ms/step - loss: 0.2121 - acc: 0.9184 - val_loss: 0.1617 - val_acc: 0.9372
Learning rate:  0.001
Epoch 47/300
924/924 [==============================] - 73s 80ms/step - loss: 0.2124 - acc: 0.9222 - val_loss: 0.1752 - val_acc: 0.9348
Learning rate:  0.001
Epoch 48/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2109 - acc: 0.9199 - val_loss: 0.1942 - val_acc: 0.9253
Learning rate:  0.001
Epoch 49/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2162 - acc: 0.9167 - val_loss: 0.1665 - val_acc: 0.9405
Learning rate:  0.001
Epoch 50/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2116 - acc: 0.9197 - val_loss: 0.2286 - val_acc: 0.9077
Learning rate:  0.001
Epoch 51/300
924/924 [==============================] - 70s 75ms/step - loss: 0.2098 - acc: 0.9200 - val_loss: 0.1682 - val_acc: 0.9407
Learning rate:  0.001
Epoch 52/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2139 - acc: 0.9192 - val_loss: 0.2031 - val_acc: 0.9242
Learning rate:  0.001
Epoch 53/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2167 - acc: 0.9174 - val_loss: 0.1659 - val_acc: 0.9397
Learning rate:  0.001
Epoch 54/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2142 - acc: 0.9184 - val_loss: 0.1669 - val_acc: 0.9353
Learning rate:  0.001
Epoch 55/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2134 - acc: 0.9185 - val_loss: 0.1880 - val_acc: 0.9351
Learning rate:  0.001
Epoch 56/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2062 - acc: 0.9198 - val_loss: 0.1901 - val_acc: 0.9337
Learning rate:  0.001
Epoch 57/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2119 - acc: 0.9185 - val_loss: 0.1807 - val_acc: 0.9351
Learning rate:  0.001
Epoch 58/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2135 - acc: 0.9170 - val_loss: 0.1604 - val_acc: 0.9391
Learning rate:  0.001
Epoch 59/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2134 - acc: 0.9192 - val_loss: 0.1703 - val_acc: 0.9378
Learning rate:  0.001
Epoch 60/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2081 - acc: 0.9206 - val_loss: 0.1781 - val_acc: 0.9370
Learning rate:  0.001
Epoch 61/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2072 - acc: 0.9195 - val_loss: 0.1544 - val_acc: 0.9424
Learning rate:  0.001
Epoch 62/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2225 - acc: 0.9151 - val_loss: 0.1639 - val_acc: 0.9416
Learning rate:  0.001
Epoch 63/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2077 - acc: 0.9187 - val_loss: 0.1678 - val_acc: 0.9356
Learning rate:  0.001
Epoch 64/300
924/924 [==============================] - 70s 75ms/step - loss: 0.2133 - acc: 0.9195 - val_loss: 0.1539 - val_acc: 0.9394
Learning rate:  0.001
Epoch 65/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2159 - acc: 0.9182 - val_loss: 0.1655 - val_acc: 0.9356
Learning rate:  0.001
Epoch 66/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2192 - acc: 0.9158 - val_loss: 0.1732 - val_acc: 0.9391
Learning rate:  0.001
Epoch 67/300
924/924 [==============================] - 79s 85ms/step - loss: 0.2115 - acc: 0.9189 - val_loss: 0.1650 - val_acc: 0.9367
Learning rate:  0.001
Epoch 68/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2209 - acc: 0.9174 - val_loss: 0.1691 - val_acc: 0.9340
Learning rate:  0.001
Epoch 69/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2091 - acc: 0.9186 - val_loss: 0.1667 - val_acc: 0.9380
Learning rate:  0.001
Epoch 70/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2118 - acc: 0.9201 - val_loss: 0.1803 - val_acc: 0.9291
Learning rate:  0.001
Epoch 71/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2120 - acc: 0.9181 - val_loss: 0.1650 - val_acc: 0.9361
Learning rate:  0.001
Epoch 72/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2079 - acc: 0.9210 - val_loss: 0.1656 - val_acc: 0.9340
Learning rate:  0.001
Epoch 73/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2099 - acc: 0.9197 - val_loss: 0.1596 - val_acc: 0.9378
Learning rate:  0.001
Epoch 74/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2211 - acc: 0.9151 - val_loss: 0.1608 - val_acc: 0.9378
Learning rate:  0.001
Epoch 75/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2113 - acc: 0.9198 - val_loss: 0.1883 - val_acc: 0.9343
Learning rate:  0.001
Epoch 76/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2130 - acc: 0.9178 - val_loss: 0.1666 - val_acc: 0.9378
Learning rate:  0.001
Epoch 77/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2091 - acc: 0.9200 - val_loss: 0.1648 - val_acc: 0.9372
Learning rate:  0.001
Epoch 78/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2126 - acc: 0.9203 - val_loss: 0.1584 - val_acc: 0.9394
Learning rate:  0.001
Epoch 79/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2130 - acc: 0.9178 - val_loss: 0.1670 - val_acc: 0.9343
Learning rate:  0.001
Epoch 80/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2118 - acc: 0.9179 - val_loss: 0.1703 - val_acc: 0.9375
Learning rate:  0.001
Epoch 81/300
924/924 [==============================] - 73s 78ms/step - loss: 0.2059 - acc: 0.9219 - val_loss: 0.1946 - val_acc: 0.9234
Learning rate:  0.001
Epoch 82/300
924/924 [==============================] - 80s 87ms/step - loss: 0.2118 - acc: 0.9197 - val_loss: 0.1693 - val_acc: 0.9348
Learning rate:  0.001
Epoch 83/300
924/924 [==============================] - 70s 76ms/step - loss: 0.2168 - acc: 0.9164 - val_loss: 0.1664 - val_acc: 0.9361
Learning rate:  0.001
Epoch 84/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2240 - acc: 0.9146 - val_loss: 0.1597 - val_acc: 0.9429
Learning rate:  0.001
Epoch 85/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2101 - acc: 0.9221 - val_loss: 0.1628 - val_acc: 0.9383
Learning rate:  0.001
Epoch 86/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2032 - acc: 0.9217 - val_loss: 0.1656 - val_acc: 0.9394
Learning rate:  0.001
Epoch 87/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2103 - acc: 0.9198 - val_loss: 0.1604 - val_acc: 0.9370
Learning rate:  0.001
Epoch 88/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2146 - acc: 0.9209 - val_loss: 0.1660 - val_acc: 0.9340
Learning rate:  0.001
Epoch 89/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2070 - acc: 0.9211 - val_loss: 0.1937 - val_acc: 0.9226
Learning rate:  0.001
Epoch 90/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2108 - acc: 0.9202 - val_loss: 0.1890 - val_acc: 0.9310
Learning rate:  0.001
Epoch 91/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2031 - acc: 0.9216 - val_loss: 0.1606 - val_acc: 0.9418
Learning rate:  0.001
Epoch 92/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2102 - acc: 0.9203 - val_loss: 0.1721 - val_acc: 0.9383
Learning rate:  0.001
Epoch 93/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2083 - acc: 0.9203 - val_loss: 0.1660 - val_acc: 0.9389
Learning rate:  0.001
Epoch 94/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2140 - acc: 0.9173 - val_loss: 0.1539 - val_acc: 0.9451
Learning rate:  0.001
Epoch 95/300
924/924 [==============================] - 76s 83ms/step - loss: 0.2064 - acc: 0.9215 - val_loss: 0.1631 - val_acc: 0.9351
Learning rate:  0.001
Epoch 96/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2088 - acc: 0.9192 - val_loss: 0.1856 - val_acc: 0.9334
Learning rate:  0.001
Epoch 97/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2088 - acc: 0.9199 - val_loss: 0.1672 - val_acc: 0.9383
Learning rate:  0.001
Epoch 98/300
924/924 [==============================] - 74s 81ms/step - loss: 0.2063 - acc: 0.9214 - val_loss: 0.1683 - val_acc: 0.9359
Learning rate:  0.001
Epoch 99/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2085 - acc: 0.9224 - val_loss: 0.1606 - val_acc: 0.9389
Learning rate:  0.001
Epoch 100/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2095 - acc: 0.9209 - val_loss: 0.1589 - val_acc: 0.9416
Learning rate:  0.001
Epoch 101/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2009 - acc: 0.9212 - val_loss: 0.1872 - val_acc: 0.9223
Learning rate:  0.001
Epoch 102/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2045 - acc: 0.9228 - val_loss: 0.1629 - val_acc: 0.9391
Learning rate:  0.001
Epoch 103/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2082 - acc: 0.9244 - val_loss: 0.1565 - val_acc: 0.9405
Learning rate:  0.001
Epoch 104/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2054 - acc: 0.9204 - val_loss: 0.1727 - val_acc: 0.9361
Learning rate:  0.001
Epoch 105/300
924/924 [==============================] - 70s 76ms/step - loss: 0.2053 - acc: 0.9205 - val_loss: 0.1530 - val_acc: 0.9424
Learning rate:  0.001
Epoch 106/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2048 - acc: 0.9216 - val_loss: 0.1707 - val_acc: 0.9386
Learning rate:  0.001
Epoch 107/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2102 - acc: 0.9202 - val_loss: 0.1661 - val_acc: 0.9391
Learning rate:  0.001
Epoch 108/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2099 - acc: 0.9169 - val_loss: 0.1684 - val_acc: 0.9367
Learning rate:  0.001
Epoch 109/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2083 - acc: 0.9194 - val_loss: 0.1672 - val_acc: 0.9386
Learning rate:  0.001
Epoch 110/300
924/924 [==============================] - 75s 82ms/step - loss: 0.2063 - acc: 0.9209 - val_loss: 0.1622 - val_acc: 0.9345
Learning rate:  0.001
Epoch 111/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2109 - acc: 0.9207 - val_loss: 0.1565 - val_acc: 0.9418
Learning rate:  0.001
Epoch 112/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2016 - acc: 0.9234 - val_loss: 0.1639 - val_acc: 0.9391
Learning rate:  0.001
Epoch 113/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2078 - acc: 0.9208 - val_loss: 0.1649 - val_acc: 0.9340
Learning rate:  0.001
Epoch 114/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2063 - acc: 0.9228 - val_loss: 0.1815 - val_acc: 0.9324
Learning rate:  0.001
Epoch 115/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2057 - acc: 0.9196 - val_loss: 0.1717 - val_acc: 0.9375
Learning rate:  0.001
Epoch 116/300
924/924 [==============================] - 70s 76ms/step - loss: 0.2059 - acc: 0.9215 - val_loss: 0.1874 - val_acc: 0.9310
Learning rate:  0.001
Epoch 117/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2079 - acc: 0.9215 - val_loss: 0.1573 - val_acc: 0.9453
Learning rate:  0.001
Epoch 118/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2106 - acc: 0.9201 - val_loss: 0.1575 - val_acc: 0.9394
Learning rate:  0.001
Epoch 119/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2040 - acc: 0.9229 - val_loss: 0.1653 - val_acc: 0.9383
Learning rate:  0.001
Epoch 120/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2081 - acc: 0.9228 - val_loss: 0.1725 - val_acc: 0.9386
Learning rate:  0.001
Epoch 121/300
924/924 [==============================] - 77s 84ms/step - loss: 0.2051 - acc: 0.9230 - val_loss: 0.1650 - val_acc: 0.9372
Learning rate:  0.001
Epoch 122/300
924/924 [==============================] - 78s 85ms/step - loss: 0.2064 - acc: 0.9224 - val_loss: 0.1601 - val_acc: 0.9421
Learning rate:  0.001
Epoch 123/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2060 - acc: 0.9200 - val_loss: 0.1526 - val_acc: 0.9464
Learning rate:  0.001
Epoch 124/300
924/924 [==============================] - 77s 84ms/step - loss: 0.2057 - acc: 0.9221 - val_loss: 0.2141 - val_acc: 0.9194
Learning rate:  0.001
Epoch 125/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2090 - acc: 0.9223 - val_loss: 0.1555 - val_acc: 0.9410
Learning rate:  0.001
Epoch 126/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2001 - acc: 0.9259 - val_loss: 0.1497 - val_acc: 0.9437
Learning rate:  0.001
Epoch 127/300
924/924 [==============================] - 72s 77ms/step - loss: 0.2044 - acc: 0.9210 - val_loss: 0.1567 - val_acc: 0.9367
Learning rate:  0.001
Epoch 128/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2062 - acc: 0.9209 - val_loss: 0.1681 - val_acc: 0.9378
Learning rate:  0.001
Epoch 129/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2253 - acc: 0.9127 - val_loss: 0.1810 - val_acc: 0.9313
Learning rate:  0.001
Epoch 130/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2216 - acc: 0.9138 - val_loss: 0.2494 - val_acc: 0.8969
Learning rate:  0.001
Epoch 131/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2070 - acc: 0.9216 - val_loss: 0.1592 - val_acc: 0.9372
Learning rate:  0.001
Epoch 132/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2197 - acc: 0.9172 - val_loss: 0.1802 - val_acc: 0.9310
Learning rate:  0.001
Epoch 133/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2235 - acc: 0.9154 - val_loss: 0.2087 - val_acc: 0.9251
Learning rate:  0.001
Epoch 134/300
924/924 [==============================] - 77s 83ms/step - loss: 0.2073 - acc: 0.9215 - val_loss: 0.1582 - val_acc: 0.9426
Learning rate:  0.001
Epoch 135/300
924/924 [==============================] - 78s 84ms/step - loss: 0.2036 - acc: 0.9244 - val_loss: 0.1596 - val_acc: 0.9410
Learning rate:  0.001
Epoch 136/300
924/924 [==============================] - 79s 86ms/step - loss: 0.2063 - acc: 0.9190 - val_loss: 0.1726 - val_acc: 0.9353
Learning rate:  0.001
Epoch 137/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1978 - acc: 0.9240 - val_loss: 0.1733 - val_acc: 0.9424
Learning rate:  0.001
Epoch 138/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2078 - acc: 0.9195 - val_loss: 0.1565 - val_acc: 0.9410
Learning rate:  0.001
Epoch 139/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2118 - acc: 0.9208 - val_loss: 0.1565 - val_acc: 0.9424
Learning rate:  0.001
Epoch 140/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2007 - acc: 0.9245 - val_loss: 0.1608 - val_acc: 0.9383
Learning rate:  0.001
Epoch 141/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2027 - acc: 0.9217 - val_loss: 0.1536 - val_acc: 0.9410
Learning rate:  0.001
Epoch 142/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2016 - acc: 0.9226 - val_loss: 0.1678 - val_acc: 0.9372
Learning rate:  0.001
Epoch 143/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1994 - acc: 0.9227 - val_loss: 0.1569 - val_acc: 0.9397
Learning rate:  0.001
Epoch 144/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2030 - acc: 0.9217 - val_loss: 0.1500 - val_acc: 0.9435
Learning rate:  0.001
Epoch 145/300
924/924 [==============================] - 74s 81ms/step - loss: 0.2211 - acc: 0.9126 - val_loss: 0.1664 - val_acc: 0.9383
Learning rate:  0.001
Epoch 146/300
924/924 [==============================] - 75s 82ms/step - loss: 0.2072 - acc: 0.9196 - val_loss: 0.1576 - val_acc: 0.9375
Learning rate:  0.001
Epoch 147/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2028 - acc: 0.9211 - val_loss: 0.1693 - val_acc: 0.9348
Learning rate:  0.001
Epoch 148/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1980 - acc: 0.9235 - val_loss: 0.1479 - val_acc: 0.9443
Learning rate:  0.001
Epoch 149/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1992 - acc: 0.9252 - val_loss: 0.1652 - val_acc: 0.9361
Learning rate:  0.001
Epoch 150/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1957 - acc: 0.9243 - val_loss: 0.2038 - val_acc: 0.9161
Learning rate:  0.001
Epoch 151/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2035 - acc: 0.9230 - val_loss: 0.1623 - val_acc: 0.9375
Learning rate:  0.0001
Epoch 152/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1896 - acc: 0.9272 - val_loss: 0.1481 - val_acc: 0.9453
Learning rate:  0.0001
Epoch 153/300
924/924 [==============================] - 77s 83ms/step - loss: 0.1974 - acc: 0.9261 - val_loss: 0.1478 - val_acc: 0.9445
Learning rate:  0.0001
Epoch 154/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1903 - acc: 0.9299 - val_loss: 0.1501 - val_acc: 0.9459
Learning rate:  0.0001
Epoch 155/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1813 - acc: 0.9320 - val_loss: 0.1474 - val_acc: 0.9483
Learning rate:  0.0001
Epoch 156/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1889 - acc: 0.9282 - val_loss: 0.1571 - val_acc: 0.9467
Learning rate:  0.0001
Epoch 157/300
924/924 [==============================] - 77s 83ms/step - loss: 0.1819 - acc: 0.9329 - val_loss: 0.1427 - val_acc: 0.9467
Learning rate:  0.0001
Epoch 158/300
924/924 [==============================] - 79s 85ms/step - loss: 0.1864 - acc: 0.9309 - val_loss: 0.1452 - val_acc: 0.9464
Learning rate:  0.0001
Epoch 159/300
924/924 [==============================] - 82s 89ms/step - loss: 0.1770 - acc: 0.9319 - val_loss: 0.1562 - val_acc: 0.9459
Learning rate:  0.0001
Epoch 160/300
924/924 [==============================] - 78s 84ms/step - loss: 0.1871 - acc: 0.9314 - val_loss: 0.1423 - val_acc: 0.9478
Learning rate:  0.0001
Epoch 161/300
924/924 [==============================] - 80s 87ms/step - loss: 0.1846 - acc: 0.9295 - val_loss: 0.1537 - val_acc: 0.9459
Learning rate:  0.0001
Epoch 162/300
924/924 [==============================] - 80s 87ms/step - loss: 0.1836 - acc: 0.9305 - val_loss: 0.1550 - val_acc: 0.9464
Learning rate:  0.0001
Epoch 163/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1836 - acc: 0.9318 - val_loss: 0.1438 - val_acc: 0.9467
Learning rate:  0.0001
Epoch 164/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1796 - acc: 0.9337 - val_loss: 0.1450 - val_acc: 0.9453
Learning rate:  0.0001
Epoch 165/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1775 - acc: 0.9331 - val_loss: 0.1643 - val_acc: 0.9453
Learning rate:  0.0001
Epoch 166/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1773 - acc: 0.9320 - val_loss: 0.1529 - val_acc: 0.9483
Learning rate:  0.0001
Epoch 167/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1830 - acc: 0.9302 - val_loss: 0.1515 - val_acc: 0.9478
Learning rate:  0.0001
Epoch 168/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1870 - acc: 0.9308 - val_loss: 0.1688 - val_acc: 0.9443
Learning rate:  0.0001
Epoch 169/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1827 - acc: 0.9289 - val_loss: 0.1407 - val_acc: 0.9475
Learning rate:  0.0001
Epoch 170/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1832 - acc: 0.9315 - val_loss: 0.1400 - val_acc: 0.9475
Learning rate:  0.0001
Epoch 171/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1819 - acc: 0.9324 - val_loss: 0.1430 - val_acc: 0.9459
Learning rate:  0.0001
Epoch 172/300
924/924 [==============================] - 69s 75ms/step - loss: 0.1835 - acc: 0.9318 - val_loss: 0.1411 - val_acc: 0.9462
Learning rate:  0.0001
Epoch 173/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1796 - acc: 0.9310 - val_loss: 0.1441 - val_acc: 0.9451
Learning rate:  0.0001
Epoch 174/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1818 - acc: 0.9317 - val_loss: 0.1603 - val_acc: 0.9470
Learning rate:  0.0001
Epoch 175/300
924/924 [==============================] - 69s 75ms/step - loss: 0.1800 - acc: 0.9296 - val_loss: 0.1399 - val_acc: 0.9472
Learning rate:  0.0001
Epoch 176/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1864 - acc: 0.9304 - val_loss: 0.1396 - val_acc: 0.9489
Learning rate:  0.0001
Epoch 177/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1756 - acc: 0.9343 - val_loss: 0.1476 - val_acc: 0.9478
Learning rate:  0.0001
Epoch 178/300
924/924 [==============================] - 73s 78ms/step - loss: 0.1798 - acc: 0.9311 - val_loss: 0.1410 - val_acc: 0.9467
Learning rate:  0.0001
Epoch 179/300
924/924 [==============================] - 77s 83ms/step - loss: 0.1785 - acc: 0.9310 - val_loss: 0.1401 - val_acc: 0.9475
Learning rate:  0.0001
Epoch 180/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1730 - acc: 0.9350 - val_loss: 0.1399 - val_acc: 0.9481
Learning rate:  0.0001
Epoch 181/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1775 - acc: 0.9320 - val_loss: 0.1468 - val_acc: 0.9451
Learning rate:  0.0001
Epoch 182/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1792 - acc: 0.9337 - val_loss: 0.1456 - val_acc: 0.9472
Learning rate:  0.0001
Epoch 183/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1867 - acc: 0.9321 - val_loss: 0.1409 - val_acc: 0.9462
Learning rate:  0.0001
Epoch 184/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1794 - acc: 0.9328 - val_loss: 0.1488 - val_acc: 0.9456
Learning rate:  0.0001
Epoch 185/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1825 - acc: 0.9281 - val_loss: 0.1677 - val_acc: 0.9437
Learning rate:  0.0001
Epoch 186/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1838 - acc: 0.9278 - val_loss: 0.1580 - val_acc: 0.9459
Learning rate:  0.0001
Epoch 187/300
924/924 [==============================] - 70s 76ms/step - loss: 0.1748 - acc: 0.9334 - val_loss: 0.1423 - val_acc: 0.9456
Learning rate:  0.0001
Epoch 188/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1752 - acc: 0.9324 - val_loss: 0.1465 - val_acc: 0.9453
Learning rate:  0.0001
Epoch 189/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1784 - acc: 0.9333 - val_loss: 0.1439 - val_acc: 0.9459
Learning rate:  0.0001
Epoch 190/300
924/924 [==============================] - 75s 82ms/step - loss: 0.1755 - acc: 0.9311 - val_loss: 0.1431 - val_acc: 0.9459
Learning rate:  0.0001
Epoch 191/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1791 - acc: 0.9318 - val_loss: 0.1412 - val_acc: 0.9456
Learning rate:  0.0001
Epoch 192/300
924/924 [==============================] - 72s 77ms/step - loss: 0.1754 - acc: 0.9323 - val_loss: 0.1579 - val_acc: 0.9459
Learning rate:  0.0001
Epoch 193/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1846 - acc: 0.9313 - val_loss: 0.1427 - val_acc: 0.9464
Learning rate:  0.0001
Epoch 194/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1744 - acc: 0.9350 - val_loss: 0.1540 - val_acc: 0.9437
Learning rate:  0.0001
Epoch 195/300
924/924 [==============================] - 74s 81ms/step - loss: 0.1833 - acc: 0.9305 - val_loss: 0.1424 - val_acc: 0.9445
Learning rate:  0.0001
Epoch 196/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1820 - acc: 0.9299 - val_loss: 0.1506 - val_acc: 0.9448
Learning rate:  0.0001
Epoch 197/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1823 - acc: 0.9344 - val_loss: 0.1647 - val_acc: 0.9426
Learning rate:  0.0001
Epoch 198/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1775 - acc: 0.9308 - val_loss: 0.1435 - val_acc: 0.9467
Learning rate:  0.0001
Epoch 199/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1781 - acc: 0.9322 - val_loss: 0.1578 - val_acc: 0.9453
Learning rate:  0.0001
Epoch 200/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1784 - acc: 0.9347 - val_loss: 0.1594 - val_acc: 0.9448
Learning rate:  0.0001
Epoch 201/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1717 - acc: 0.9343 - val_loss: 0.1402 - val_acc: 0.9462
Learning rate:  1e-05
Epoch 202/300
924/924 [==============================] - 76s 82ms/step - loss: 0.1796 - acc: 0.9330 - val_loss: 0.1404 - val_acc: 0.9462
Learning rate:  1e-05
Epoch 203/300
924/924 [==============================] - 77s 84ms/step - loss: 0.1757 - acc: 0.9327 - val_loss: 0.1395 - val_acc: 0.9470
Learning rate:  1e-05
Epoch 204/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1775 - acc: 0.9314 - val_loss: 0.1405 - val_acc: 0.9462
Learning rate:  1e-05
Epoch 205/300
924/924 [==============================] - 70s 76ms/step - loss: 0.1787 - acc: 0.9338 - val_loss: 0.1397 - val_acc: 0.9462
Learning rate:  1e-05
Epoch 206/300
924/924 [==============================] - 70s 76ms/step - loss: 0.1719 - acc: 0.9370 - val_loss: 0.1529 - val_acc: 0.9453
Learning rate:  1e-05
Epoch 207/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1797 - acc: 0.9335 - val_loss: 0.1402 - val_acc: 0.9456
Learning rate:  1e-05
Epoch 208/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1683 - acc: 0.9364 - val_loss: 0.1390 - val_acc: 0.9470
Learning rate:  1e-05
Epoch 209/300
924/924 [==============================] - 70s 76ms/step - loss: 0.1812 - acc: 0.9328 - val_loss: 0.1446 - val_acc: 0.9462
Learning rate:  1e-05
Epoch 210/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1810 - acc: 0.9315 - val_loss: 0.1400 - val_acc: 0.9470
Learning rate:  1e-05
Epoch 211/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1744 - acc: 0.9341 - val_loss: 0.1406 - val_acc: 0.9472
Learning rate:  1e-05
Epoch 212/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1771 - acc: 0.9330 - val_loss: 0.1402 - val_acc: 0.9453
Learning rate:  1e-05
Epoch 213/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1752 - acc: 0.9371 - val_loss: 0.1494 - val_acc: 0.9462
Learning rate:  1e-05
Epoch 214/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1798 - acc: 0.9309 - val_loss: 0.1412 - val_acc: 0.9459
Learning rate:  1e-05
Epoch 215/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1759 - acc: 0.9320 - val_loss: 0.1573 - val_acc: 0.9448
Learning rate:  1e-05
Epoch 216/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1773 - acc: 0.9331 - val_loss: 0.1416 - val_acc: 0.9453
Learning rate:  1e-05
Epoch 217/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1792 - acc: 0.9312 - val_loss: 0.1566 - val_acc: 0.9462
Learning rate:  1e-05
Epoch 218/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1777 - acc: 0.9309 - val_loss: 0.1391 - val_acc: 0.9478
Learning rate:  1e-05
Epoch 219/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1712 - acc: 0.9324 - val_loss: 0.1445 - val_acc: 0.9462
Learning rate:  1e-05
Epoch 220/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1796 - acc: 0.9318 - val_loss: 0.1436 - val_acc: 0.9472
Learning rate:  1e-05
Epoch 221/300
924/924 [==============================] - 71s 76ms/step - loss: 0.1780 - acc: 0.9339 - val_loss: 0.1410 - val_acc: 0.9478
Learning rate:  1e-05
Epoch 222/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1772 - acc: 0.9311 - val_loss: 0.1387 - val_acc: 0.9472
Learning rate:  1e-05
Epoch 223/300
924/924 [==============================] - 70s 76ms/step - loss: 0.1787 - acc: 0.9299 - val_loss: 0.1539 - val_acc: 0.9464
Learning rate:  1e-05
Epoch 224/300
924/924 [==============================] - 77s 84ms/step - loss: 0.1774 - acc: 0.9314 - val_loss: 0.1386 - val_acc: 0.9472
Learning rate:  1e-05
Epoch 225/300
924/924 [==============================] - 70s 76ms/step - loss: 0.1756 - acc: 0.9345 - val_loss: 0.1562 - val_acc: 0.9459
Learning rate:  1e-05
Epoch 226/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1782 - acc: 0.9334 - val_loss: 0.1385 - val_acc: 0.9470
Learning rate:  1e-05
Epoch 227/300
924/924 [==============================] - 72s 77ms/step - loss: 0.1770 - acc: 0.9345 - val_loss: 0.1385 - val_acc: 0.9472
Learning rate:  1e-05
Epoch 228/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1706 - acc: 0.9362 - val_loss: 0.1394 - val_acc: 0.9475
Learning rate:  1e-05
Epoch 229/300
924/924 [==============================] - 71s 76ms/step - loss: 0.1755 - acc: 0.9334 - val_loss: 0.1566 - val_acc: 0.9467
Learning rate:  1e-05
Epoch 230/300
924/924 [==============================] - 72s 77ms/step - loss: 0.1844 - acc: 0.9328 - val_loss: 0.1390 - val_acc: 0.9470
Learning rate:  1e-05
Epoch 231/300
924/924 [==============================] - 70s 75ms/step - loss: 0.1759 - acc: 0.9326 - val_loss: 0.1388 - val_acc: 0.9472
Learning rate:  1e-05
Epoch 232/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1691 - acc: 0.9368 - val_loss: 0.1435 - val_acc: 0.9464
Learning rate:  1e-05
Epoch 233/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1802 - acc: 0.9316 - val_loss: 0.1562 - val_acc: 0.9475
Learning rate:  1e-05
Epoch 234/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1768 - acc: 0.9328 - val_loss: 0.1391 - val_acc: 0.9470
Learning rate:  1e-05
Epoch 235/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1737 - acc: 0.9340 - val_loss: 0.1562 - val_acc: 0.9462
Learning rate:  1e-05
Epoch 236/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1867 - acc: 0.9309 - val_loss: 0.1390 - val_acc: 0.9470
Learning rate:  1e-05
Epoch 237/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1789 - acc: 0.9323 - val_loss: 0.1389 - val_acc: 0.9462
Learning rate:  1e-05
Epoch 238/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1746 - acc: 0.9336 - val_loss: 0.1386 - val_acc: 0.9464
Learning rate:  1e-05
Epoch 239/300
924/924 [==============================] - 78s 84ms/step - loss: 0.1743 - acc: 0.9326 - val_loss: 0.1514 - val_acc: 0.9467
Learning rate:  1e-05
Epoch 240/300
924/924 [==============================] - 78s 84ms/step - loss: 0.1757 - acc: 0.9324 - val_loss: 0.1385 - val_acc: 0.9464
Learning rate:  1e-05
Epoch 241/300
924/924 [==============================] - 75s 82ms/step - loss: 0.1769 - acc: 0.9349 - val_loss: 0.1516 - val_acc: 0.9456
Learning rate:  1e-06
Epoch 242/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1750 - acc: 0.9346 - val_loss: 0.1517 - val_acc: 0.9456
Learning rate:  1e-06
Epoch 243/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1787 - acc: 0.9331 - val_loss: 0.1486 - val_acc: 0.9456
Learning rate:  1e-06
Epoch 244/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1749 - acc: 0.9326 - val_loss: 0.1391 - val_acc: 0.9462
Learning rate:  1e-06
Epoch 245/300
924/924 [==============================] - 72s 77ms/step - loss: 0.1788 - acc: 0.9327 - val_loss: 0.1561 - val_acc: 0.9451
Learning rate:  1e-06
Epoch 246/300
924/924 [==============================] - 71s 76ms/step - loss: 0.1755 - acc: 0.9344 - val_loss: 0.1386 - val_acc: 0.9462
Learning rate:  1e-06
Epoch 247/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1755 - acc: 0.9322 - val_loss: 0.1388 - val_acc: 0.9478
Learning rate:  1e-06
Epoch 248/300
924/924 [==============================] - 70s 76ms/step - loss: 0.1868 - acc: 0.9301 - val_loss: 0.1435 - val_acc: 0.9459
Learning rate:  1e-06
Epoch 249/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1744 - acc: 0.9338 - val_loss: 0.1380 - val_acc: 0.9467
Learning rate:  1e-06
Epoch 250/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1756 - acc: 0.9345 - val_loss: 0.1385 - val_acc: 0.9462
Learning rate:  1e-06
Epoch 251/300
924/924 [==============================] - 69s 75ms/step - loss: 0.1709 - acc: 0.9363 - val_loss: 0.1393 - val_acc: 0.9464
Learning rate:  1e-06
Epoch 252/300
924/924 [==============================] - 71s 76ms/step - loss: 0.1752 - acc: 0.9332 - val_loss: 0.1385 - val_acc: 0.9459
Learning rate:  1e-06
Epoch 253/300
924/924 [==============================] - 71s 76ms/step - loss: 0.1722 - acc: 0.9349 - val_loss: 0.1524 - val_acc: 0.9456
Learning rate:  1e-06
Epoch 254/300
924/924 [==============================] - 76s 82ms/step - loss: 0.1722 - acc: 0.9364 - val_loss: 0.1445 - val_acc: 0.9470
Learning rate:  1e-06
Epoch 255/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1757 - acc: 0.9336 - val_loss: 0.1389 - val_acc: 0.9464
Learning rate:  1e-06
Epoch 256/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1749 - acc: 0.9341 - val_loss: 0.1381 - val_acc: 0.9464
Learning rate:  1e-06
Epoch 257/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1747 - acc: 0.9357 - val_loss: 0.1516 - val_acc: 0.9445
Learning rate:  1e-06
Epoch 258/300
924/924 [==============================] - 69s 75ms/step - loss: 0.1715 - acc: 0.9326 - val_loss: 0.1385 - val_acc: 0.9467
Learning rate:  1e-06
Epoch 259/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1760 - acc: 0.9349 - val_loss: 0.1378 - val_acc: 0.9464
Learning rate:  1e-06
Epoch 260/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1711 - acc: 0.9336 - val_loss: 0.1385 - val_acc: 0.9464
Learning rate:  1e-06
Epoch 261/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1780 - acc: 0.9313 - val_loss: 0.1386 - val_acc: 0.9470
Learning rate:  1e-06
Epoch 262/300
924/924 [==============================] - 70s 76ms/step - loss: 0.1784 - acc: 0.9318 - val_loss: 0.1385 - val_acc: 0.9456
Learning rate:  1e-06
Epoch 263/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1712 - acc: 0.9350 - val_loss: 0.1471 - val_acc: 0.9462
Learning rate:  1e-06
Epoch 264/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1745 - acc: 0.9358 - val_loss: 0.1438 - val_acc: 0.9456
Learning rate:  1e-06
Epoch 265/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1838 - acc: 0.9319 - val_loss: 0.1383 - val_acc: 0.9459
Learning rate:  1e-06
Epoch 266/300
924/924 [==============================] - 69s 75ms/step - loss: 0.1790 - acc: 0.9328 - val_loss: 0.1385 - val_acc: 0.9475
Learning rate:  1e-06
Epoch 267/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1764 - acc: 0.9325 - val_loss: 0.1387 - val_acc: 0.9459
Learning rate:  1e-06
Epoch 268/300
924/924 [==============================] - 71s 76ms/step - loss: 0.1837 - acc: 0.9288 - val_loss: 0.1521 - val_acc: 0.9453
Learning rate:  1e-06
Epoch 269/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1799 - acc: 0.9334 - val_loss: 0.1473 - val_acc: 0.9448
Learning rate:  1e-06
Epoch 270/300
924/924 [==============================] - 71s 76ms/step - loss: 0.1849 - acc: 0.9322 - val_loss: 0.1447 - val_acc: 0.9456
Learning rate:  1e-06
Epoch 271/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1799 - acc: 0.9318 - val_loss: 0.1385 - val_acc: 0.9475
Learning rate:  5e-07
Epoch 272/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1821 - acc: 0.9321 - val_loss: 0.1515 - val_acc: 0.9451
Learning rate:  5e-07
Epoch 273/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1817 - acc: 0.9320 - val_loss: 0.1549 - val_acc: 0.9464
Learning rate:  5e-07
Epoch 274/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1785 - acc: 0.9337 - val_loss: 0.1598 - val_acc: 0.9448
Learning rate:  5e-07
Epoch 275/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1782 - acc: 0.9330 - val_loss: 0.1390 - val_acc: 0.9467
Learning rate:  5e-07
Epoch 276/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1766 - acc: 0.9332 - val_loss: 0.1556 - val_acc: 0.9467
Learning rate:  5e-07
Epoch 277/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1719 - acc: 0.9343 - val_loss: 0.1386 - val_acc: 0.9464
Learning rate:  5e-07
Epoch 278/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1727 - acc: 0.9338 - val_loss: 0.1392 - val_acc: 0.9456
Learning rate:  5e-07
Epoch 279/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1746 - acc: 0.9331 - val_loss: 0.1385 - val_acc: 0.9459
Learning rate:  5e-07
Epoch 280/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1704 - acc: 0.9361 - val_loss: 0.1391 - val_acc: 0.9464
Learning rate:  5e-07
Epoch 281/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1773 - acc: 0.9328 - val_loss: 0.1394 - val_acc: 0.9467
Learning rate:  5e-07
Epoch 282/300
924/924 [==============================] - 73s 78ms/step - loss: 0.1731 - acc: 0.9363 - val_loss: 0.1382 - val_acc: 0.9472
Learning rate:  5e-07
Epoch 283/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1711 - acc: 0.9347 - val_loss: 0.1383 - val_acc: 0.9464
Learning rate:  5e-07
Epoch 284/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1780 - acc: 0.9337 - val_loss: 0.1394 - val_acc: 0.9470
Learning rate:  5e-07
Epoch 285/300
924/924 [==============================] - 74s 80ms/step - loss: 0.1790 - acc: 0.9315 - val_loss: 0.1387 - val_acc: 0.9462
Learning rate:  5e-07
Epoch 286/300
924/924 [==============================] - 69s 75ms/step - loss: 0.1705 - acc: 0.9357 - val_loss: 0.1392 - val_acc: 0.9470
Learning rate:  5e-07
Epoch 287/300
924/924 [==============================] - 76s 82ms/step - loss: 0.1766 - acc: 0.9330 - val_loss: 0.1390 - val_acc: 0.9470
Learning rate:  5e-07
Epoch 288/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1807 - acc: 0.9341 - val_loss: 0.1391 - val_acc: 0.9456
Learning rate:  5e-07
Epoch 289/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1834 - acc: 0.9315 - val_loss: 0.1392 - val_acc: 0.9459
Learning rate:  5e-07
Epoch 290/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1743 - acc: 0.9339 - val_loss: 0.1380 - val_acc: 0.9462
Learning rate:  5e-07
Epoch 291/300
924/924 [==============================] - 70s 75ms/step - loss: 0.1745 - acc: 0.9332 - val_loss: 0.1388 - val_acc: 0.9467
Learning rate:  5e-07
Epoch 292/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1840 - acc: 0.9311 - val_loss: 0.1389 - val_acc: 0.9475
Learning rate:  5e-07
Epoch 293/300
924/924 [==============================] - 76s 82ms/step - loss: 0.1758 - acc: 0.9324 - val_loss: 0.1388 - val_acc: 0.9462
Learning rate:  5e-07
Epoch 294/300
924/924 [==============================] - 72s 78ms/step - loss: 0.1733 - acc: 0.9345 - val_loss: 0.1383 - val_acc: 0.9475
Learning rate:  5e-07
Epoch 295/300
924/924 [==============================] - 71s 77ms/step - loss: 0.1781 - acc: 0.9337 - val_loss: 0.1489 - val_acc: 0.9470
Learning rate:  5e-07
Epoch 296/300
924/924 [==============================] - 76s 82ms/step - loss: 0.1789 - acc: 0.9334 - val_loss: 0.1386 - val_acc: 0.9462
Learning rate:  5e-07
Epoch 297/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1804 - acc: 0.9326 - val_loss: 0.1512 - val_acc: 0.9462
Learning rate:  5e-07
Epoch 298/300
924/924 [==============================] - 75s 81ms/step - loss: 0.1768 - acc: 0.9328 - val_loss: 0.1376 - val_acc: 0.9467
Learning rate:  5e-07
Epoch 299/300
924/924 [==============================] - 73s 79ms/step - loss: 0.1731 - acc: 0.9345 - val_loss: 0.1388 - val_acc: 0.9459
Learning rate:  5e-07
Epoch 300/300
924/924 [==============================] - 82s 89ms/step - loss: 0.1772 - acc: 0.9313 - val_loss: 0.1379 - val_acc: 0.9467
Out[23]:
<tensorflow.python.keras.callbacks.History at 0x13ee0409240>
In [35]:
##### Now the training is complete, we get
# another object to load the weights
# compile it, so that we can do 
# final evaluation on it
modelGo.load_weights(filepath)
modelGo.compile(loss='categorical_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])
In [36]:
# Make classification on the test dataset
predicts    = modelGo.predict(tsDat)

# Prepare the classification output
# for the classification report
predout     = np.argmax(predicts,axis=1)
testout     = np.argmax(tsLbl,axis=1)
labelname   = ['non-flower', 'flower']
                                            # the labels for the classfication report


testScores  = metrics.accuracy_score(testout,predout)
confusion   = metrics.confusion_matrix(testout,predout)


print("Best accuracy (on testing dataset): %.2f%%" % (testScores*100))
print(metrics.classification_report(testout,predout,target_names=labelname,digits=4))
print(confusion)
Best accuracy (on testing dataset): 94.89%
              precision    recall  f1-score   support

  non-flower     0.9296    0.9405    0.9350      1446
      flower     0.9615    0.9542    0.9578      2250

    accuracy                         0.9489      3696
   macro avg     0.9455    0.9474    0.9464      3696
weighted avg     0.9490    0.9489    0.9489      3696

[[1360   86]
 [ 103 2147]]
In [37]:
import pandas as pd

records     = pd.read_csv(modelname +'.csv')
plt.figure()
plt.subplot(211)
plt.plot(records['val_loss'])
plt.plot(records['loss'])
plt.yticks([0, 0.20, 0.30, 0.4, 0.5])
plt.title('Loss value',fontsize=12)

ax          = plt.gca()
ax.set_xticklabels([])



plt.subplot(212)
plt.plot(records['val_acc'])
plt.plot(records['acc'])
plt.yticks([0.7, 0.8, 0.9, 1.0])
plt.title('Accuracy',fontsize=12)
plt.show()
In [27]:
wrong_ans_index = []

for i in range(len(predout)):
    if predout[i] != testout[i]:
        wrong_ans_index.append(i)
In [28]:
wrong_ans_index = list(set(wrong_ans_index))
In [29]:
# Randomly show X examples of that was wrong

dataset = tsDatOrg #flowers #fungus #rocks

for index in wrong_ans_index:
    #index = wrong_ans_index[random.randint(0, len(wrong_ans_index)-1)]
    print("Showing %s index image" %(index))
    print("Predicted as %s but is actually %s" %(predout[index], testout[index]))
    imgplot = plt.imshow(data[index])
    plt.show()
Showing 2560 index image
Predicted as 1 but is actually 0
Showing 3585 index image
Predicted as 1 but is actually 0
Showing 2051 index image
Predicted as 0 but is actually 1
Showing 4 index image
Predicted as 0 but is actually 1
Showing 2053 index image
Predicted as 0 but is actually 1
Showing 3076 index image
Predicted as 1 but is actually 0
Showing 3079 index image
Predicted as 1 but is actually 0
Showing 3588 index image
Predicted as 1 but is actually 0
Showing 1547 index image
Predicted as 0 but is actually 1
Showing 17 index image
Predicted as 0 but is actually 1
Showing 3603 index image
Predicted as 1 but is actually 0
Showing 536 index image
Predicted as 0 but is actually 1
Showing 2584 index image
Predicted as 1 but is actually 0
Showing 2585 index image
Predicted as 1 but is actually 0
Showing 32 index image
Predicted as 0 but is actually 1
Showing 1572 index image
Predicted as 0 but is actually 1
Showing 2610 index image
Predicted as 1 but is actually 0
Showing 570 index image
Predicted as 0 but is actually 1
Showing 571 index image
Predicted as 0 but is actually 1
Showing 2111 index image
Predicted as 0 but is actually 1
Showing 3136 index image
Predicted as 1 but is actually 0
Showing 69 index image
Predicted as 0 but is actually 1
Showing 70 index image
Predicted as 0 but is actually 1
Showing 3141 index image
Predicted as 1 but is actually 0
Showing 3656 index image
Predicted as 1 but is actually 0
Showing 75 index image
Predicted as 0 but is actually 1
Showing 1616 index image
Predicted as 0 but is actually 1
Showing 83 index image
Predicted as 0 but is actually 1
Showing 1619 index image
Predicted as 0 but is actually 1
Showing 597 index image
Predicted as 0 but is actually 1
Showing 1109 index image
Predicted as 0 but is actually 1
Showing 2132 index image
Predicted as 0 but is actually 1
Showing 2648 index image
Predicted as 1 but is actually 0
Showing 89 index image
Predicted as 0 but is actually 1
Showing 601 index image
Predicted as 0 but is actually 1
Showing 3155 index image
Predicted as 1 but is actually 0
Showing 3668 index image
Predicted as 1 but is actually 0
Showing 3671 index image
Predicted as 1 but is actually 0
Showing 2656 index image
Predicted as 1 but is actually 0
Showing 3680 index image
Predicted as 1 but is actually 0
Showing 1642 index image
Predicted as 0 but is actually 1
Showing 3183 index image
Predicted as 1 but is actually 0
Showing 628 index image
Predicted as 0 but is actually 1
Showing 2164 index image
Predicted as 0 but is actually 1
Showing 631 index image
Predicted as 0 but is actually 1
Showing 119 index image
Predicted as 0 but is actually 1
Showing 636 index image
Predicted as 0 but is actually 1
Showing 2176 index image
Predicted as 0 but is actually 1
Showing 130 index image
Predicted as 0 but is actually 1
Showing 642 index image
Predicted as 0 but is actually 1
Showing 3203 index image
Predicted as 1 but is actually 0
Showing 1669 index image
Predicted as 0 but is actually 1
Showing 2181 index image
Predicted as 0 but is actually 1
Showing 647 index image
Predicted as 0 but is actually 1
Showing 137 index image
Predicted as 0 but is actually 1
Showing 1164 index image
Predicted as 0 but is actually 1
Showing 2189 index image
Predicted as 0 but is actually 1
Showing 147 index image
Predicted as 0 but is actually 1
Showing 2195 index image
Predicted as 0 but is actually 1
Showing 662 index image
Predicted as 0 but is actually 1
Showing 3224 index image
Predicted as 1 but is actually 0
Showing 2713 index image
Predicted as 1 but is actually 0
Showing 3226 index image
Predicted as 1 but is actually 0
Showing 3233 index image
Predicted as 1 but is actually 0
Showing 2210 index image
Predicted as 0 but is actually 1
Showing 677 index image
Predicted as 0 but is actually 1
Showing 1704 index image
Predicted as 0 but is actually 1
Showing 1706 index image
Predicted as 0 but is actually 1
Showing 2221 index image
Predicted as 0 but is actually 1
Showing 176 index image
Predicted as 0 but is actually 1
Showing 3254 index image
Predicted as 1 but is actually 0
Showing 2743 index image
Predicted as 1 but is actually 0
Showing 2749 index image
Predicted as 1 but is actually 0
Showing 3266 index image
Predicted as 1 but is actually 0
Showing 1220 index image
Predicted as 0 but is actually 1
Showing 3268 index image
Predicted as 1 but is actually 0
Showing 2249 index image
Predicted as 0 but is actually 1
Showing 2763 index image
Predicted as 1 but is actually 0
Showing 2770 index image
Predicted as 1 but is actually 0
Showing 2261 index image
Predicted as 1 but is actually 0
Showing 224 index image
Predicted as 0 but is actually 1
Showing 736 index image
Predicted as 0 but is actually 1
Showing 3297 index image
Predicted as 1 but is actually 0
Showing 1259 index image
Predicted as 0 but is actually 1
Showing 752 index image
Predicted as 0 but is actually 1
Showing 753 index image
Predicted as 0 but is actually 1
Showing 1264 index image
Predicted as 0 but is actually 1
Showing 246 index image
Predicted as 0 but is actually 1
Showing 2297 index image
Predicted as 1 but is actually 0
Showing 253 index image
Predicted as 0 but is actually 1
Showing 2302 index image
Predicted as 1 but is actually 0
Showing 3326 index image
Predicted as 1 but is actually 0
Showing 2309 index image
Predicted as 1 but is actually 0
Showing 2821 index image
Predicted as 1 but is actually 0
Showing 270 index image
Predicted as 0 but is actually 1
Showing 1807 index image
Predicted as 0 but is actually 1
Showing 1808 index image
Predicted as 0 but is actually 1
Showing 1298 index image
Predicted as 0 but is actually 1
Showing 3347 index image
Predicted as 1 but is actually 0
Showing 795 index image
Predicted as 0 but is actually 1
Showing 2331 index image
Predicted as 1 but is actually 0
Showing 2337 index image
Predicted as 1 but is actually 0
Showing 2343 index image
Predicted as 1 but is actually 0
Showing 3367 index image
Predicted as 1 but is actually 0
Showing 2858 index image
Predicted as 1 but is actually 0
Showing 1836 index image
Predicted as 0 but is actually 1
Showing 1325 index image
Predicted as 0 but is actually 1
Showing 2864 index image
Predicted as 1 but is actually 0
Showing 1329 index image
Predicted as 0 but is actually 1
Showing 2355 index image
Predicted as 1 but is actually 0
Showing 820 index image
Predicted as 0 but is actually 1
Showing 1845 index image
Predicted as 0 but is actually 1
Showing 312 index image
Predicted as 0 but is actually 1
Showing 2360 index image
Predicted as 1 but is actually 0
Showing 3384 index image
Predicted as 1 but is actually 0
Showing 1854 index image
Predicted as 0 but is actually 1
Showing 2375 index image
Predicted as 1 but is actually 0
Showing 1868 index image
Predicted as 0 but is actually 1
Showing 2390 index image
Predicted as 1 but is actually 0
Showing 2903 index image
Predicted as 1 but is actually 0
Showing 347 index image
Predicted as 0 but is actually 1
Showing 1371 index image
Predicted as 0 but is actually 1
Showing 2909 index image
Predicted as 1 but is actually 0
Showing 2398 index image
Predicted as 1 but is actually 0
Showing 3419 index image
Predicted as 1 but is actually 0
Showing 2400 index image
Predicted as 1 but is actually 0
Showing 2914 index image
Predicted as 1 but is actually 0
Showing 2916 index image
Predicted as 1 but is actually 0
Showing 2405 index image
Predicted as 1 but is actually 0
Showing 1382 index image
Predicted as 0 but is actually 1
Showing 2407 index image
Predicted as 1 but is actually 0
Showing 2921 index image
Predicted as 1 but is actually 0
Showing 1902 index image
Predicted as 0 but is actually 1
Showing 367 index image
Predicted as 0 but is actually 1
Showing 2929 index image
Predicted as 1 but is actually 0
Showing 1397 index image
Predicted as 0 but is actually 1
Showing 1398 index image
Predicted as 0 but is actually 1
Showing 376 index image
Predicted as 0 but is actually 1
Showing 2425 index image
Predicted as 1 but is actually 0
Showing 2426 index image
Predicted as 1 but is actually 0
Showing 2430 index image
Predicted as 1 but is actually 0
Showing 2947 index image
Predicted as 1 but is actually 0
Showing 2436 index image
Predicted as 1 but is actually 0
Showing 390 index image
Predicted as 0 but is actually 1
Showing 2441 index image
Predicted as 1 but is actually 0
Showing 2442 index image
Predicted as 1 but is actually 0
Showing 2444 index image
Predicted as 1 but is actually 0
Showing 910 index image
Predicted as 0 but is actually 1
Showing 1424 index image
Predicted as 0 but is actually 1
Showing 1426 index image
Predicted as 0 but is actually 1
Showing 3480 index image
Predicted as 1 but is actually 0
Showing 1945 index image
Predicted as 0 but is actually 1
Showing 416 index image
Predicted as 0 but is actually 1
Showing 3493 index image
Predicted as 1 but is actually 0
Showing 1963 index image
Predicted as 0 but is actually 1
Showing 1454 index image
Predicted as 0 but is actually 1
Showing 1967 index image
Predicted as 0 but is actually 1
Showing 433 index image
Predicted as 0 but is actually 1
Showing 2482 index image
Predicted as 1 but is actually 0
Showing 2997 index image
Predicted as 1 but is actually 0
Showing 951 index image
Predicted as 0 but is actually 1
Showing 440 index image
Predicted as 0 but is actually 1
Showing 1466 index image
Predicted as 0 but is actually 1
Showing 1978 index image
Predicted as 0 but is actually 1
Showing 1981 index image
Predicted as 0 but is actually 1
Showing 958 index image
Predicted as 0 but is actually 1
Showing 1470 index image
Predicted as 0 but is actually 1
Showing 3521 index image
Predicted as 1 but is actually 0
Showing 962 index image
Predicted as 0 but is actually 1
Showing 2498 index image
Predicted as 1 but is actually 0
Showing 1988 index image
Predicted as 0 but is actually 1
Showing 2505 index image
Predicted as 1 but is actually 0
Showing 3017 index image
Predicted as 1 but is actually 0
Showing 3028 index image
Predicted as 1 but is actually 0
Showing 1493 index image
Predicted as 0 but is actually 1
Showing 3542 index image
Predicted as 1 but is actually 0
Showing 3033 index image
Predicted as 1 but is actually 0
Showing 986 index image
Predicted as 0 but is actually 1
Showing 2526 index image
Predicted as 1 but is actually 0
Showing 2528 index image
Predicted as 1 but is actually 0
Showing 3044 index image
Predicted as 1 but is actually 0
Showing 2534 index image
Predicted as 1 but is actually 0
Showing 2025 index image
Predicted as 0 but is actually 1
Showing 1517 index image
Predicted as 0 but is actually 1
Showing 2029 index image
Predicted as 0 but is actually 1
Showing 3571 index image
Predicted as 1 but is actually 0
Showing 3062 index image
Predicted as 1 but is actually 0
Showing 1020 index image
Predicted as 0 but is actually 1
Showing 1022 index image
Predicted as 0 but is actually 1

Stacking 3 NNs?

In [38]:
modelGo1     = create_inception_v4() # This is used for final testing
modelGo1.load_weights("FlowerPower_InceptionV4best9470.hdf5")
modelGo1.compile(loss='categorical_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])

modelGo2     = create_inception_resnet_v2() # This is used for final testing
modelGo2.load_weights("FlowerPower_InceptionResNetV2best9521.hdf5")
modelGo2.compile(loss='categorical_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])

modelGo3     = create_se_inception_resnet_v2() # This is used for final testing
modelGo3.load_weights("FlowerPower_SEInceptionResNetV2best9489.hdf5")
modelGo3.compile(loss='categorical_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])
In [39]:
predicts1    = modelGo1.predict(tsDat)
predicts2    = modelGo2.predict(tsDat)
predicts3    = modelGo3.predict(tsDat)
In [88]:
predicts_ave = []
weights = [0.33, 0.33, 0.33]

for i in range(len(predicts1)):
    ave0 = predicts1[i][0]*weights[0] + predicts2[i][0]*weights[1] + predicts3[i][0]*weights[2]
    ave1 = predicts1[i][1]*weights[0] + predicts2[i][1]*weights[1] + predicts3[i][1]*weights[2]
    predicts_ave.append([ave0, ave1])
In [89]:
predicts_ave = np.array(predicts_ave)
In [90]:
# Prepare the classification output
# for the classification report
predout     = np.argmax(predicts_ave,axis=1)
testout     = np.argmax(tsLbl,axis=1)
labelname   = ['non-flower', 'flower']
                                            # the labels for the classfication report


testScores  = metrics.accuracy_score(testout,predout)
confusion   = metrics.confusion_matrix(testout,predout)


print("Best accuracy (on testing dataset): %.2f%%" % (testScores*100))
print(metrics.classification_report(testout,predout,target_names=labelname,digits=4))
print(confusion)
Best accuracy (on testing dataset): 95.73%
              precision    recall  f1-score   support

  non-flower     0.9322    0.9606    0.9462      1446
      flower     0.9742    0.9551    0.9645      2250

    accuracy                         0.9573      3696
   macro avg     0.9532    0.9578    0.9554      3696
weighted avg     0.9578    0.9573    0.9574      3696

[[1389   57]
 [ 101 2149]]
In [91]:
predicts_ave = []
weights = [0.30, 0.40, 0.30]

for i in range(len(predicts1)):
    ave0 = predicts1[i][0]*weights[0] + predicts2[i][0]*weights[1] + predicts3[i][0]*weights[2]
    ave1 = predicts1[i][1]*weights[0] + predicts2[i][1]*weights[1] + predicts3[i][1]*weights[2]
    predicts_ave.append([ave0, ave1])

predicts_ave = np.array(predicts_ave)

# Prepare the classification output
# for the classification report
predout     = np.argmax(predicts_ave,axis=1)
testout     = np.argmax(tsLbl,axis=1)
labelname   = ['non-flower', 'flower']
                                            # the labels for the classfication report


testScores  = metrics.accuracy_score(testout,predout)
confusion   = metrics.confusion_matrix(testout,predout)


print("Best accuracy (on testing dataset): %.2f%%" % (testScores*100))
print(metrics.classification_report(testout,predout,target_names=labelname,digits=4))
print(confusion)
Best accuracy (on testing dataset): 95.73%
              precision    recall  f1-score   support

  non-flower     0.9305    0.9627    0.9463      1446
      flower     0.9755    0.9538    0.9645      2250

    accuracy                         0.9573      3696
   macro avg     0.9530    0.9582    0.9554      3696
weighted avg     0.9579    0.9573    0.9574      3696

[[1392   54]
 [ 104 2146]]
In [79]:
weights_all = []
for i in np.linspace(0, 0.5, 21):
    for j in np.linspace(0, 0.5, 21):
        weights_all.append([float(round(i,4)), float(round(j,4)), float(round(1-i-j,4))])
In [87]:
for weights in weights_all:
    print(weights)
    
    predicts_ave = []
    for i in range(len(predicts1)):
        ave0 = predicts1[i][0]*weights[0] + predicts2[i][0]*weights[1] + predicts3[i][0]*weights[2]
        ave1 = predicts1[i][1]*weights[0] + predicts2[i][1]*weights[1] + predicts3[i][1]*weights[2]
        predicts_ave.append([ave0, ave1])

    predicts_ave = np.array(predicts_ave)

    # Prepare the classification output
    # for the classification report
    predout     = np.argmax(predicts_ave,axis=1)
    testout     = np.argmax(tsLbl,axis=1)
    labelname   = ['non-flower', 'flower']
                                                # the labels for the classfication report


    testScores  = metrics.accuracy_score(testout,predout)
    confusion   = metrics.confusion_matrix(testout,predout)


    print("Best accuracy (on testing dataset): %.2f%%" % (testScores*100))
    print("\n")
[0.0, 0.0, 1.0]
Best accuracy (on testing dataset): 94.89%


[0.0, 0.025, 0.975]
Best accuracy (on testing dataset): 94.94%


[0.0, 0.05, 0.95]
Best accuracy (on testing dataset): 94.99%


[0.0, 0.075, 0.925]
Best accuracy (on testing dataset): 95.02%


[0.0, 0.1, 0.9]
Best accuracy (on testing dataset): 95.05%


[0.0, 0.125, 0.875]
Best accuracy (on testing dataset): 95.13%


[0.0, 0.15, 0.85]
Best accuracy (on testing dataset): 95.10%


[0.0, 0.175, 0.825]
Best accuracy (on testing dataset): 95.18%


[0.0, 0.2, 0.8]
Best accuracy (on testing dataset): 95.21%


[0.0, 0.225, 0.775]
Best accuracy (on testing dataset): 95.16%


[0.0, 0.25, 0.75]
Best accuracy (on testing dataset): 95.16%


[0.0, 0.275, 0.725]
Best accuracy (on testing dataset): 95.16%


[0.0, 0.3, 0.7]
Best accuracy (on testing dataset): 95.18%


[0.0, 0.325, 0.675]
Best accuracy (on testing dataset): 95.24%


[0.0, 0.35, 0.65]
Best accuracy (on testing dataset): 95.24%


[0.0, 0.375, 0.625]
Best accuracy (on testing dataset): 95.27%


[0.0, 0.4, 0.6]
Best accuracy (on testing dataset): 95.29%


[0.0, 0.425, 0.575]
Best accuracy (on testing dataset): 95.32%


[0.0, 0.45, 0.55]
Best accuracy (on testing dataset): 95.35%


[0.0, 0.475, 0.525]
Best accuracy (on testing dataset): 95.32%


[0.0, 0.5, 0.5]
Best accuracy (on testing dataset): 95.40%


[0.025, 0.0, 0.975]
Best accuracy (on testing dataset): 94.91%


[0.025, 0.025, 0.95]
Best accuracy (on testing dataset): 94.89%


[0.025, 0.05, 0.925]
Best accuracy (on testing dataset): 94.97%


[0.025, 0.075, 0.9]
Best accuracy (on testing dataset): 94.99%


[0.025, 0.1, 0.875]
Best accuracy (on testing dataset): 95.05%


[0.025, 0.125, 0.85]
Best accuracy (on testing dataset): 95.13%


[0.025, 0.15, 0.825]
Best accuracy (on testing dataset): 95.18%


[0.025, 0.175, 0.8]
Best accuracy (on testing dataset): 95.21%


[0.025, 0.2, 0.775]
Best accuracy (on testing dataset): 95.16%


[0.025, 0.225, 0.75]
Best accuracy (on testing dataset): 95.16%


[0.025, 0.25, 0.725]
Best accuracy (on testing dataset): 95.16%


[0.025, 0.275, 0.7]
Best accuracy (on testing dataset): 95.18%


[0.025, 0.3, 0.675]
Best accuracy (on testing dataset): 95.18%


[0.025, 0.325, 0.65]
Best accuracy (on testing dataset): 95.24%


[0.025, 0.35, 0.625]
Best accuracy (on testing dataset): 95.35%


[0.025, 0.375, 0.6]
Best accuracy (on testing dataset): 95.37%


[0.025, 0.4, 0.575]
Best accuracy (on testing dataset): 95.37%


[0.025, 0.425, 0.55]
Best accuracy (on testing dataset): 95.40%


[0.025, 0.45, 0.525]
Best accuracy (on testing dataset): 95.37%


[0.025, 0.475, 0.5]
Best accuracy (on testing dataset): 95.45%


[0.025, 0.5, 0.475]
Best accuracy (on testing dataset): 95.48%


[0.05, 0.0, 0.95]
Best accuracy (on testing dataset): 94.89%


[0.05, 0.025, 0.925]
Best accuracy (on testing dataset): 94.91%


[0.05, 0.05, 0.9]
Best accuracy (on testing dataset): 94.97%


[0.05, 0.075, 0.875]
Best accuracy (on testing dataset): 95.05%


[0.05, 0.1, 0.85]
Best accuracy (on testing dataset): 95.08%


[0.05, 0.125, 0.825]
Best accuracy (on testing dataset): 95.21%


[0.05, 0.15, 0.8]
Best accuracy (on testing dataset): 95.29%


[0.05, 0.175, 0.775]
Best accuracy (on testing dataset): 95.18%


[0.05, 0.2, 0.75]
Best accuracy (on testing dataset): 95.24%


[0.05, 0.225, 0.725]
Best accuracy (on testing dataset): 95.16%


[0.05, 0.25, 0.7]
Best accuracy (on testing dataset): 95.21%


[0.05, 0.275, 0.675]
Best accuracy (on testing dataset): 95.27%


[0.05, 0.3, 0.65]
Best accuracy (on testing dataset): 95.37%


[0.05, 0.325, 0.625]
Best accuracy (on testing dataset): 95.35%


[0.05, 0.35, 0.6]
Best accuracy (on testing dataset): 95.40%


[0.05, 0.375, 0.575]
Best accuracy (on testing dataset): 95.45%


[0.05, 0.4, 0.55]
Best accuracy (on testing dataset): 95.40%


[0.05, 0.425, 0.525]
Best accuracy (on testing dataset): 95.37%


[0.05, 0.45, 0.5]
Best accuracy (on testing dataset): 95.51%


[0.05, 0.475, 0.475]
Best accuracy (on testing dataset): 95.51%


[0.05, 0.5, 0.45]
Best accuracy (on testing dataset): 95.48%


[0.075, 0.0, 0.925]
Best accuracy (on testing dataset): 94.91%


[0.075, 0.025, 0.9]
Best accuracy (on testing dataset): 94.91%


[0.075, 0.05, 0.875]
Best accuracy (on testing dataset): 94.99%


[0.075, 0.075, 0.85]
Best accuracy (on testing dataset): 95.13%


[0.075, 0.1, 0.825]
Best accuracy (on testing dataset): 95.27%


[0.075, 0.125, 0.8]
Best accuracy (on testing dataset): 95.29%


[0.075, 0.15, 0.775]
Best accuracy (on testing dataset): 95.29%


[0.075, 0.175, 0.75]
Best accuracy (on testing dataset): 95.27%


[0.075, 0.2, 0.725]
Best accuracy (on testing dataset): 95.24%


[0.075, 0.225, 0.7]
Best accuracy (on testing dataset): 95.18%


[0.075, 0.25, 0.675]
Best accuracy (on testing dataset): 95.29%


[0.075, 0.275, 0.65]
Best accuracy (on testing dataset): 95.35%


[0.075, 0.3, 0.625]
Best accuracy (on testing dataset): 95.43%


[0.075, 0.325, 0.6]
Best accuracy (on testing dataset): 95.40%


[0.075, 0.35, 0.575]
Best accuracy (on testing dataset): 95.40%


[0.075, 0.375, 0.55]
Best accuracy (on testing dataset): 95.45%


[0.075, 0.4, 0.525]
Best accuracy (on testing dataset): 95.43%


[0.075, 0.425, 0.5]
Best accuracy (on testing dataset): 95.48%


[0.075, 0.45, 0.475]
Best accuracy (on testing dataset): 95.51%


[0.075, 0.475, 0.45]
Best accuracy (on testing dataset): 95.56%


[0.075, 0.5, 0.425]
Best accuracy (on testing dataset): 95.51%


[0.1, 0.0, 0.9]
Best accuracy (on testing dataset): 94.91%


[0.1, 0.025, 0.875]
Best accuracy (on testing dataset): 94.94%


[0.1, 0.05, 0.85]
Best accuracy (on testing dataset): 95.10%


[0.1, 0.075, 0.825]
Best accuracy (on testing dataset): 95.21%


[0.1, 0.1, 0.8]
Best accuracy (on testing dataset): 95.27%


[0.1, 0.125, 0.775]
Best accuracy (on testing dataset): 95.32%


[0.1, 0.15, 0.75]
Best accuracy (on testing dataset): 95.29%


[0.1, 0.175, 0.725]
Best accuracy (on testing dataset): 95.35%


[0.1, 0.2, 0.7]
Best accuracy (on testing dataset): 95.32%


[0.1, 0.225, 0.675]
Best accuracy (on testing dataset): 95.32%


[0.1, 0.25, 0.65]
Best accuracy (on testing dataset): 95.32%


[0.1, 0.275, 0.625]
Best accuracy (on testing dataset): 95.40%


[0.1, 0.3, 0.6]
Best accuracy (on testing dataset): 95.43%


[0.1, 0.325, 0.575]
Best accuracy (on testing dataset): 95.48%


[0.1, 0.35, 0.55]
Best accuracy (on testing dataset): 95.40%


[0.1, 0.375, 0.525]
Best accuracy (on testing dataset): 95.45%


[0.1, 0.4, 0.5]
Best accuracy (on testing dataset): 95.48%


[0.1, 0.425, 0.475]
Best accuracy (on testing dataset): 95.54%


[0.1, 0.45, 0.45]
Best accuracy (on testing dataset): 95.56%


[0.1, 0.475, 0.425]
Best accuracy (on testing dataset): 95.54%


[0.1, 0.5, 0.4]
Best accuracy (on testing dataset): 95.45%


[0.125, 0.0, 0.875]
Best accuracy (on testing dataset): 94.94%


[0.125, 0.025, 0.85]
Best accuracy (on testing dataset): 95.05%


[0.125, 0.05, 0.825]
Best accuracy (on testing dataset): 95.13%


[0.125, 0.075, 0.8]
Best accuracy (on testing dataset): 95.27%


[0.125, 0.1, 0.775]
Best accuracy (on testing dataset): 95.32%


[0.125, 0.125, 0.75]
Best accuracy (on testing dataset): 95.40%


[0.125, 0.15, 0.725]
Best accuracy (on testing dataset): 95.43%


[0.125, 0.175, 0.7]
Best accuracy (on testing dataset): 95.40%


[0.125, 0.2, 0.675]
Best accuracy (on testing dataset): 95.37%


[0.125, 0.225, 0.65]
Best accuracy (on testing dataset): 95.40%


[0.125, 0.25, 0.625]
Best accuracy (on testing dataset): 95.32%


[0.125, 0.275, 0.6]
Best accuracy (on testing dataset): 95.51%


[0.125, 0.3, 0.575]
Best accuracy (on testing dataset): 95.54%


[0.125, 0.325, 0.55]
Best accuracy (on testing dataset): 95.54%


[0.125, 0.35, 0.525]
Best accuracy (on testing dataset): 95.51%


[0.125, 0.375, 0.5]
Best accuracy (on testing dataset): 95.56%


[0.125, 0.4, 0.475]
Best accuracy (on testing dataset): 95.59%


[0.125, 0.425, 0.45]
Best accuracy (on testing dataset): 95.51%


[0.125, 0.45, 0.425]
Best accuracy (on testing dataset): 95.56%


[0.125, 0.475, 0.4]
Best accuracy (on testing dataset): 95.56%


[0.125, 0.5, 0.375]
Best accuracy (on testing dataset): 95.62%


[0.15, 0.0, 0.85]
Best accuracy (on testing dataset): 94.97%


[0.15, 0.025, 0.825]
Best accuracy (on testing dataset): 95.10%


[0.15, 0.05, 0.8]
Best accuracy (on testing dataset): 95.18%


[0.15, 0.075, 0.775]
Best accuracy (on testing dataset): 95.32%


[0.15, 0.1, 0.75]
Best accuracy (on testing dataset): 95.37%


[0.15, 0.125, 0.725]
Best accuracy (on testing dataset): 95.37%


[0.15, 0.15, 0.7]
Best accuracy (on testing dataset): 95.43%


[0.15, 0.175, 0.675]
Best accuracy (on testing dataset): 95.40%


[0.15, 0.2, 0.65]
Best accuracy (on testing dataset): 95.43%


[0.15, 0.225, 0.625]
Best accuracy (on testing dataset): 95.43%


[0.15, 0.25, 0.6]
Best accuracy (on testing dataset): 95.48%


[0.15, 0.275, 0.575]
Best accuracy (on testing dataset): 95.54%


[0.15, 0.3, 0.55]
Best accuracy (on testing dataset): 95.54%


[0.15, 0.325, 0.525]
Best accuracy (on testing dataset): 95.70%


[0.15, 0.35, 0.5]
Best accuracy (on testing dataset): 95.62%


[0.15, 0.375, 0.475]
Best accuracy (on testing dataset): 95.56%


[0.15, 0.4, 0.45]
Best accuracy (on testing dataset): 95.56%


[0.15, 0.425, 0.425]
Best accuracy (on testing dataset): 95.62%


[0.15, 0.45, 0.4]
Best accuracy (on testing dataset): 95.67%


[0.15, 0.475, 0.375]
Best accuracy (on testing dataset): 95.64%


[0.15, 0.5, 0.35]
Best accuracy (on testing dataset): 95.62%


[0.175, 0.0, 0.825]
Best accuracy (on testing dataset): 95.08%


[0.175, 0.025, 0.8]
Best accuracy (on testing dataset): 95.18%


[0.175, 0.05, 0.775]
Best accuracy (on testing dataset): 95.29%


[0.175, 0.075, 0.75]
Best accuracy (on testing dataset): 95.35%


[0.175, 0.1, 0.725]
Best accuracy (on testing dataset): 95.32%


[0.175, 0.125, 0.7]
Best accuracy (on testing dataset): 95.40%


[0.175, 0.15, 0.675]
Best accuracy (on testing dataset): 95.45%


[0.175, 0.175, 0.65]
Best accuracy (on testing dataset): 95.48%


[0.175, 0.2, 0.625]
Best accuracy (on testing dataset): 95.51%


[0.175, 0.225, 0.6]
Best accuracy (on testing dataset): 95.48%


[0.175, 0.25, 0.575]
Best accuracy (on testing dataset): 95.59%


[0.175, 0.275, 0.55]
Best accuracy (on testing dataset): 95.59%


[0.175, 0.3, 0.525]
Best accuracy (on testing dataset): 95.67%


[0.175, 0.325, 0.5]
Best accuracy (on testing dataset): 95.67%


[0.175, 0.35, 0.475]
Best accuracy (on testing dataset): 95.64%


[0.175, 0.375, 0.45]
Best accuracy (on testing dataset): 95.56%


[0.175, 0.4, 0.425]
Best accuracy (on testing dataset): 95.62%


[0.175, 0.425, 0.4]
Best accuracy (on testing dataset): 95.67%


[0.175, 0.45, 0.375]
Best accuracy (on testing dataset): 95.67%


[0.175, 0.475, 0.35]
Best accuracy (on testing dataset): 95.62%


[0.175, 0.5, 0.325]
Best accuracy (on testing dataset): 95.62%


[0.2, 0.0, 0.8]
Best accuracy (on testing dataset): 95.21%


[0.2, 0.025, 0.775]
Best accuracy (on testing dataset): 95.27%


[0.2, 0.05, 0.75]
Best accuracy (on testing dataset): 95.29%


[0.2, 0.075, 0.725]
Best accuracy (on testing dataset): 95.37%


[0.2, 0.1, 0.7]
Best accuracy (on testing dataset): 95.37%


[0.2, 0.125, 0.675]
Best accuracy (on testing dataset): 95.43%


[0.2, 0.15, 0.65]
Best accuracy (on testing dataset): 95.51%


[0.2, 0.175, 0.625]
Best accuracy (on testing dataset): 95.56%


[0.2, 0.2, 0.6]
Best accuracy (on testing dataset): 95.56%


[0.2, 0.225, 0.575]
Best accuracy (on testing dataset): 95.62%


[0.2, 0.25, 0.55]
Best accuracy (on testing dataset): 95.62%


[0.2, 0.275, 0.525]
Best accuracy (on testing dataset): 95.70%


[0.2, 0.3, 0.5]
Best accuracy (on testing dataset): 95.70%


[0.2, 0.325, 0.475]
Best accuracy (on testing dataset): 95.67%


[0.2, 0.35, 0.45]
Best accuracy (on testing dataset): 95.67%


[0.2, 0.375, 0.425]
Best accuracy (on testing dataset): 95.64%


[0.2, 0.4, 0.4]
Best accuracy (on testing dataset): 95.64%


[0.2, 0.425, 0.375]
Best accuracy (on testing dataset): 95.67%


[0.2, 0.45, 0.35]
Best accuracy (on testing dataset): 95.64%


[0.2, 0.475, 0.325]
Best accuracy (on testing dataset): 95.64%


[0.2, 0.5, 0.3]
Best accuracy (on testing dataset): 95.64%


[0.225, 0.0, 0.775]
Best accuracy (on testing dataset): 95.27%


[0.225, 0.025, 0.75]
Best accuracy (on testing dataset): 95.29%


[0.225, 0.05, 0.725]
Best accuracy (on testing dataset): 95.35%


[0.225, 0.075, 0.7]
Best accuracy (on testing dataset): 95.40%


[0.225, 0.1, 0.675]
Best accuracy (on testing dataset): 95.51%


[0.225, 0.125, 0.65]
Best accuracy (on testing dataset): 95.48%


[0.225, 0.15, 0.625]
Best accuracy (on testing dataset): 95.54%


[0.225, 0.175, 0.6]
Best accuracy (on testing dataset): 95.59%


[0.225, 0.2, 0.575]
Best accuracy (on testing dataset): 95.64%


[0.225, 0.225, 0.55]
Best accuracy (on testing dataset): 95.62%


[0.225, 0.25, 0.525]
Best accuracy (on testing dataset): 95.67%


[0.225, 0.275, 0.5]
Best accuracy (on testing dataset): 95.70%


[0.225, 0.3, 0.475]
Best accuracy (on testing dataset): 95.70%


[0.225, 0.325, 0.45]
Best accuracy (on testing dataset): 95.70%


[0.225, 0.35, 0.425]
Best accuracy (on testing dataset): 95.70%


[0.225, 0.375, 0.4]
Best accuracy (on testing dataset): 95.70%


[0.225, 0.4, 0.375]
Best accuracy (on testing dataset): 95.62%


[0.225, 0.425, 0.35]
Best accuracy (on testing dataset): 95.64%


[0.225, 0.45, 0.325]
Best accuracy (on testing dataset): 95.67%


[0.225, 0.475, 0.3]
Best accuracy (on testing dataset): 95.64%


[0.225, 0.5, 0.275]
Best accuracy (on testing dataset): 95.64%


[0.25, 0.0, 0.75]
Best accuracy (on testing dataset): 95.29%


[0.25, 0.025, 0.725]
Best accuracy (on testing dataset): 95.37%


[0.25, 0.05, 0.7]
Best accuracy (on testing dataset): 95.48%


[0.25, 0.075, 0.675]
Best accuracy (on testing dataset): 95.48%


[0.25, 0.1, 0.65]
Best accuracy (on testing dataset): 95.54%


[0.25, 0.125, 0.625]
Best accuracy (on testing dataset): 95.54%


[0.25, 0.15, 0.6]
Best accuracy (on testing dataset): 95.56%


[0.25, 0.175, 0.575]
Best accuracy (on testing dataset): 95.62%


[0.25, 0.2, 0.55]
Best accuracy (on testing dataset): 95.62%


[0.25, 0.225, 0.525]
Best accuracy (on testing dataset): 95.70%


[0.25, 0.25, 0.5]
Best accuracy (on testing dataset): 95.67%


[0.25, 0.275, 0.475]
Best accuracy (on testing dataset): 95.73%


[0.25, 0.3, 0.45]
Best accuracy (on testing dataset): 95.73%


[0.25, 0.325, 0.425]
Best accuracy (on testing dataset): 95.75%


[0.25, 0.35, 0.4]
Best accuracy (on testing dataset): 95.67%


[0.25, 0.375, 0.375]
Best accuracy (on testing dataset): 95.67%


[0.25, 0.4, 0.35]
Best accuracy (on testing dataset): 95.67%


[0.25, 0.425, 0.325]
Best accuracy (on testing dataset): 95.67%


[0.25, 0.45, 0.3]
Best accuracy (on testing dataset): 95.62%


[0.25, 0.475, 0.275]
Best accuracy (on testing dataset): 95.62%


[0.25, 0.5, 0.25]
Best accuracy (on testing dataset): 95.64%


[0.275, 0.0, 0.725]
Best accuracy (on testing dataset): 95.37%


[0.275, 0.025, 0.7]
Best accuracy (on testing dataset): 95.59%


[0.275, 0.05, 0.675]
Best accuracy (on testing dataset): 95.59%


[0.275, 0.075, 0.65]
Best accuracy (on testing dataset): 95.56%


[0.275, 0.1, 0.625]
Best accuracy (on testing dataset): 95.56%


[0.275, 0.125, 0.6]
Best accuracy (on testing dataset): 95.54%


[0.275, 0.15, 0.575]
Best accuracy (on testing dataset): 95.59%


[0.275, 0.175, 0.55]
Best accuracy (on testing dataset): 95.62%


[0.275, 0.2, 0.525]
Best accuracy (on testing dataset): 95.62%


[0.275, 0.225, 0.5]
Best accuracy (on testing dataset): 95.73%


[0.275, 0.25, 0.475]
Best accuracy (on testing dataset): 95.73%


[0.275, 0.275, 0.45]
Best accuracy (on testing dataset): 95.73%


[0.275, 0.3, 0.425]
Best accuracy (on testing dataset): 95.73%


[0.275, 0.325, 0.4]
Best accuracy (on testing dataset): 95.73%


[0.275, 0.35, 0.375]
Best accuracy (on testing dataset): 95.67%


[0.275, 0.375, 0.35]
Best accuracy (on testing dataset): 95.75%


[0.275, 0.4, 0.325]
Best accuracy (on testing dataset): 95.73%


[0.275, 0.425, 0.3]
Best accuracy (on testing dataset): 95.64%


[0.275, 0.45, 0.275]
Best accuracy (on testing dataset): 95.70%


[0.275, 0.475, 0.25]
Best accuracy (on testing dataset): 95.70%


[0.275, 0.5, 0.225]
Best accuracy (on testing dataset): 95.67%


[0.3, 0.0, 0.7]
Best accuracy (on testing dataset): 95.51%


[0.3, 0.025, 0.675]
Best accuracy (on testing dataset): 95.54%


[0.3, 0.05, 0.65]
Best accuracy (on testing dataset): 95.56%


[0.3, 0.075, 0.625]
Best accuracy (on testing dataset): 95.62%


[0.3, 0.1, 0.6]
Best accuracy (on testing dataset): 95.54%


[0.3, 0.125, 0.575]
Best accuracy (on testing dataset): 95.59%


[0.3, 0.15, 0.55]
Best accuracy (on testing dataset): 95.64%


[0.3, 0.175, 0.525]
Best accuracy (on testing dataset): 95.67%


[0.3, 0.2, 0.5]
Best accuracy (on testing dataset): 95.73%


[0.3, 0.225, 0.475]
Best accuracy (on testing dataset): 95.73%


[0.3, 0.25, 0.45]
Best accuracy (on testing dataset): 95.70%


[0.3, 0.275, 0.425]
Best accuracy (on testing dataset): 95.73%


[0.3, 0.3, 0.4]
Best accuracy (on testing dataset): 95.67%


[0.3, 0.325, 0.375]
Best accuracy (on testing dataset): 95.70%


[0.3, 0.35, 0.35]
Best accuracy (on testing dataset): 95.75%


[0.3, 0.375, 0.325]
Best accuracy (on testing dataset): 95.70%


[0.3, 0.4, 0.3]
Best accuracy (on testing dataset): 95.73%


[0.3, 0.425, 0.275]
Best accuracy (on testing dataset): 95.75%


[0.3, 0.45, 0.25]
Best accuracy (on testing dataset): 95.75%


[0.3, 0.475, 0.225]
Best accuracy (on testing dataset): 95.67%


[0.3, 0.5, 0.2]
Best accuracy (on testing dataset): 95.67%


[0.325, 0.0, 0.675]
Best accuracy (on testing dataset): 95.37%


[0.325, 0.025, 0.65]
Best accuracy (on testing dataset): 95.51%


[0.325, 0.05, 0.625]
Best accuracy (on testing dataset): 95.56%


[0.325, 0.075, 0.6]
Best accuracy (on testing dataset): 95.56%


[0.325, 0.1, 0.575]
Best accuracy (on testing dataset): 95.54%


[0.325, 0.125, 0.55]
Best accuracy (on testing dataset): 95.64%


[0.325, 0.15, 0.525]
Best accuracy (on testing dataset): 95.70%


[0.325, 0.175, 0.5]
Best accuracy (on testing dataset): 95.81%


[0.325, 0.2, 0.475]
Best accuracy (on testing dataset): 95.75%


[0.325, 0.225, 0.45]
Best accuracy (on testing dataset): 95.73%


[0.325, 0.25, 0.425]
Best accuracy (on testing dataset): 95.67%


[0.325, 0.275, 0.4]
Best accuracy (on testing dataset): 95.67%


[0.325, 0.3, 0.375]
Best accuracy (on testing dataset): 95.73%


[0.325, 0.325, 0.35]
Best accuracy (on testing dataset): 95.73%


[0.325, 0.35, 0.325]
Best accuracy (on testing dataset): 95.81%


[0.325, 0.375, 0.3]
Best accuracy (on testing dataset): 95.78%


[0.325, 0.4, 0.275]
Best accuracy (on testing dataset): 95.81%


[0.325, 0.425, 0.25]
Best accuracy (on testing dataset): 95.83%


[0.325, 0.45, 0.225]
Best accuracy (on testing dataset): 95.73%


[0.325, 0.475, 0.2]
Best accuracy (on testing dataset): 95.64%


[0.325, 0.5, 0.175]
Best accuracy (on testing dataset): 95.64%


[0.35, 0.0, 0.65]
Best accuracy (on testing dataset): 95.37%


[0.35, 0.025, 0.625]
Best accuracy (on testing dataset): 95.37%


[0.35, 0.05, 0.6]
Best accuracy (on testing dataset): 95.51%


[0.35, 0.075, 0.575]
Best accuracy (on testing dataset): 95.56%


[0.35, 0.1, 0.55]
Best accuracy (on testing dataset): 95.56%


[0.35, 0.125, 0.525]
Best accuracy (on testing dataset): 95.64%


[0.35, 0.15, 0.5]
Best accuracy (on testing dataset): 95.70%


[0.35, 0.175, 0.475]
Best accuracy (on testing dataset): 95.75%


[0.35, 0.2, 0.45]
Best accuracy (on testing dataset): 95.73%


[0.35, 0.225, 0.425]
Best accuracy (on testing dataset): 95.67%


[0.35, 0.25, 0.4]
Best accuracy (on testing dataset): 95.64%


[0.35, 0.275, 0.375]
Best accuracy (on testing dataset): 95.67%


[0.35, 0.3, 0.35]
Best accuracy (on testing dataset): 95.70%


[0.35, 0.325, 0.325]
Best accuracy (on testing dataset): 95.75%


[0.35, 0.35, 0.3]
Best accuracy (on testing dataset): 95.83%


[0.35, 0.375, 0.275]
Best accuracy (on testing dataset): 95.86%


[0.35, 0.4, 0.25]
Best accuracy (on testing dataset): 95.81%


[0.35, 0.425, 0.225]
Best accuracy (on testing dataset): 95.75%


[0.35, 0.45, 0.2]
Best accuracy (on testing dataset): 95.70%


[0.35, 0.475, 0.175]
Best accuracy (on testing dataset): 95.62%


[0.35, 0.5, 0.15]
Best accuracy (on testing dataset): 95.56%


[0.375, 0.0, 0.625]
Best accuracy (on testing dataset): 95.35%


[0.375, 0.025, 0.6]
Best accuracy (on testing dataset): 95.43%


[0.375, 0.05, 0.575]
Best accuracy (on testing dataset): 95.48%


[0.375, 0.075, 0.55]
Best accuracy (on testing dataset): 95.54%


[0.375, 0.1, 0.525]
Best accuracy (on testing dataset): 95.62%


[0.375, 0.125, 0.5]
Best accuracy (on testing dataset): 95.73%


[0.375, 0.15, 0.475]
Best accuracy (on testing dataset): 95.75%


[0.375, 0.175, 0.45]
Best accuracy (on testing dataset): 95.75%


[0.375, 0.2, 0.425]
Best accuracy (on testing dataset): 95.73%


[0.375, 0.225, 0.4]
Best accuracy (on testing dataset): 95.64%


[0.375, 0.25, 0.375]
Best accuracy (on testing dataset): 95.62%


[0.375, 0.275, 0.35]
Best accuracy (on testing dataset): 95.64%


[0.375, 0.3, 0.325]
Best accuracy (on testing dataset): 95.70%


[0.375, 0.325, 0.3]
Best accuracy (on testing dataset): 95.81%


[0.375, 0.35, 0.275]
Best accuracy (on testing dataset): 95.81%


[0.375, 0.375, 0.25]
Best accuracy (on testing dataset): 95.83%


[0.375, 0.4, 0.225]
Best accuracy (on testing dataset): 95.78%


[0.375, 0.425, 0.2]
Best accuracy (on testing dataset): 95.75%


[0.375, 0.45, 0.175]
Best accuracy (on testing dataset): 95.67%


[0.375, 0.475, 0.15]
Best accuracy (on testing dataset): 95.56%


[0.375, 0.5, 0.125]
Best accuracy (on testing dataset): 95.54%


[0.4, 0.0, 0.6]
Best accuracy (on testing dataset): 95.43%


[0.4, 0.025, 0.575]
Best accuracy (on testing dataset): 95.54%


[0.4, 0.05, 0.55]
Best accuracy (on testing dataset): 95.51%


[0.4, 0.075, 0.525]
Best accuracy (on testing dataset): 95.56%


[0.4, 0.1, 0.5]
Best accuracy (on testing dataset): 95.67%


[0.4, 0.125, 0.475]
Best accuracy (on testing dataset): 95.70%


[0.4, 0.15, 0.45]
Best accuracy (on testing dataset): 95.78%


[0.4, 0.175, 0.425]
Best accuracy (on testing dataset): 95.81%


[0.4, 0.2, 0.4]
Best accuracy (on testing dataset): 95.67%


[0.4, 0.225, 0.375]
Best accuracy (on testing dataset): 95.67%


[0.4, 0.25, 0.35]
Best accuracy (on testing dataset): 95.64%


[0.4, 0.275, 0.325]
Best accuracy (on testing dataset): 95.62%


[0.4, 0.3, 0.3]
Best accuracy (on testing dataset): 95.75%


[0.4, 0.325, 0.275]
Best accuracy (on testing dataset): 95.75%


[0.4, 0.35, 0.25]
Best accuracy (on testing dataset): 95.73%


[0.4, 0.375, 0.225]
Best accuracy (on testing dataset): 95.73%


[0.4, 0.4, 0.2]
Best accuracy (on testing dataset): 95.70%


[0.4, 0.425, 0.175]
Best accuracy (on testing dataset): 95.64%


[0.4, 0.45, 0.15]
Best accuracy (on testing dataset): 95.64%


[0.4, 0.475, 0.125]
Best accuracy (on testing dataset): 95.56%


[0.4, 0.5, 0.1]
Best accuracy (on testing dataset): 95.62%


[0.425, 0.0, 0.575]
Best accuracy (on testing dataset): 95.48%


[0.425, 0.025, 0.55]
Best accuracy (on testing dataset): 95.51%


[0.425, 0.05, 0.525]
Best accuracy (on testing dataset): 95.59%


[0.425, 0.075, 0.5]
Best accuracy (on testing dataset): 95.70%


[0.425, 0.1, 0.475]
Best accuracy (on testing dataset): 95.73%


[0.425, 0.125, 0.45]
Best accuracy (on testing dataset): 95.70%


[0.425, 0.15, 0.425]
Best accuracy (on testing dataset): 95.83%


[0.425, 0.175, 0.4]
Best accuracy (on testing dataset): 95.75%


[0.425, 0.2, 0.375]
Best accuracy (on testing dataset): 95.67%


[0.425, 0.225, 0.35]
Best accuracy (on testing dataset): 95.59%


[0.425, 0.25, 0.325]
Best accuracy (on testing dataset): 95.67%


[0.425, 0.275, 0.3]
Best accuracy (on testing dataset): 95.75%


[0.425, 0.3, 0.275]
Best accuracy (on testing dataset): 95.75%


[0.425, 0.325, 0.25]
Best accuracy (on testing dataset): 95.70%


[0.425, 0.35, 0.225]
Best accuracy (on testing dataset): 95.67%


[0.425, 0.375, 0.2]
Best accuracy (on testing dataset): 95.67%


[0.425, 0.4, 0.175]
Best accuracy (on testing dataset): 95.64%


[0.425, 0.425, 0.15]
Best accuracy (on testing dataset): 95.62%


[0.425, 0.45, 0.125]
Best accuracy (on testing dataset): 95.62%


[0.425, 0.475, 0.1]
Best accuracy (on testing dataset): 95.59%


[0.425, 0.5, 0.075]
Best accuracy (on testing dataset): 95.51%


[0.45, 0.0, 0.55]
Best accuracy (on testing dataset): 95.51%


[0.45, 0.025, 0.525]
Best accuracy (on testing dataset): 95.51%


[0.45, 0.05, 0.5]
Best accuracy (on testing dataset): 95.67%


[0.45, 0.075, 0.475]
Best accuracy (on testing dataset): 95.67%


[0.45, 0.1, 0.45]
Best accuracy (on testing dataset): 95.73%


[0.45, 0.125, 0.425]
Best accuracy (on testing dataset): 95.67%


[0.45, 0.15, 0.4]
Best accuracy (on testing dataset): 95.73%


[0.45, 0.175, 0.375]
Best accuracy (on testing dataset): 95.67%


[0.45, 0.2, 0.35]
Best accuracy (on testing dataset): 95.67%


[0.45, 0.225, 0.325]
Best accuracy (on testing dataset): 95.67%


[0.45, 0.25, 0.3]
Best accuracy (on testing dataset): 95.73%


[0.45, 0.275, 0.275]
Best accuracy (on testing dataset): 95.73%


[0.45, 0.3, 0.25]
Best accuracy (on testing dataset): 95.70%


[0.45, 0.325, 0.225]
Best accuracy (on testing dataset): 95.64%


[0.45, 0.35, 0.2]
Best accuracy (on testing dataset): 95.64%


[0.45, 0.375, 0.175]
Best accuracy (on testing dataset): 95.64%


[0.45, 0.4, 0.15]
Best accuracy (on testing dataset): 95.64%


[0.45, 0.425, 0.125]
Best accuracy (on testing dataset): 95.67%


[0.45, 0.45, 0.1]
Best accuracy (on testing dataset): 95.59%


[0.45, 0.475, 0.075]
Best accuracy (on testing dataset): 95.54%


[0.45, 0.5, 0.05]
Best accuracy (on testing dataset): 95.51%


[0.475, 0.0, 0.525]
Best accuracy (on testing dataset): 95.43%


[0.475, 0.025, 0.5]
Best accuracy (on testing dataset): 95.62%


[0.475, 0.05, 0.475]
Best accuracy (on testing dataset): 95.73%


[0.475, 0.075, 0.45]
Best accuracy (on testing dataset): 95.73%


[0.475, 0.1, 0.425]
Best accuracy (on testing dataset): 95.73%


[0.475, 0.125, 0.4]
Best accuracy (on testing dataset): 95.70%


[0.475, 0.15, 0.375]
Best accuracy (on testing dataset): 95.67%


[0.475, 0.175, 0.35]
Best accuracy (on testing dataset): 95.64%


[0.475, 0.2, 0.325]
Best accuracy (on testing dataset): 95.73%


[0.475, 0.225, 0.3]
Best accuracy (on testing dataset): 95.75%


[0.475, 0.25, 0.275]
Best accuracy (on testing dataset): 95.78%


[0.475, 0.275, 0.25]
Best accuracy (on testing dataset): 95.70%


[0.475, 0.3, 0.225]
Best accuracy (on testing dataset): 95.67%


[0.475, 0.325, 0.2]
Best accuracy (on testing dataset): 95.64%


[0.475, 0.35, 0.175]
Best accuracy (on testing dataset): 95.64%


[0.475, 0.375, 0.15]
Best accuracy (on testing dataset): 95.64%


[0.475, 0.4, 0.125]
Best accuracy (on testing dataset): 95.73%


[0.475, 0.425, 0.1]
Best accuracy (on testing dataset): 95.67%


[0.475, 0.45, 0.075]
Best accuracy (on testing dataset): 95.59%


[0.475, 0.475, 0.05]
Best accuracy (on testing dataset): 95.59%


[0.475, 0.5, 0.025]
Best accuracy (on testing dataset): 95.56%


[0.5, 0.0, 0.5]
Best accuracy (on testing dataset): 95.45%


[0.5, 0.025, 0.475]
Best accuracy (on testing dataset): 95.62%


[0.5, 0.05, 0.45]
Best accuracy (on testing dataset): 95.62%


[0.5, 0.075, 0.425]
Best accuracy (on testing dataset): 95.62%


[0.5, 0.1, 0.4]
Best accuracy (on testing dataset): 95.64%


[0.5, 0.125, 0.375]
Best accuracy (on testing dataset): 95.62%


[0.5, 0.15, 0.35]
Best accuracy (on testing dataset): 95.62%


[0.5, 0.175, 0.325]
Best accuracy (on testing dataset): 95.67%


[0.5, 0.2, 0.3]
Best accuracy (on testing dataset): 95.64%


[0.5, 0.225, 0.275]
Best accuracy (on testing dataset): 95.64%


[0.5, 0.25, 0.25]
Best accuracy (on testing dataset): 95.70%


[0.5, 0.275, 0.225]
Best accuracy (on testing dataset): 95.67%


[0.5, 0.3, 0.2]
Best accuracy (on testing dataset): 95.64%


[0.5, 0.325, 0.175]
Best accuracy (on testing dataset): 95.62%


[0.5, 0.35, 0.15]
Best accuracy (on testing dataset): 95.62%


[0.5, 0.375, 0.125]
Best accuracy (on testing dataset): 95.64%


[0.5, 0.4, 0.1]
Best accuracy (on testing dataset): 95.67%


[0.5, 0.425, 0.075]
Best accuracy (on testing dataset): 95.67%


[0.5, 0.45, 0.05]
Best accuracy (on testing dataset): 95.70%


[0.5, 0.475, 0.025]
Best accuracy (on testing dataset): 95.67%


[0.5, 0.5, 0.0]
Best accuracy (on testing dataset): 95.62%


In [ ]:
# Best accuracy is obtained with the following weights:

# [0.35, 0.375, 0.275]
# Best accuracy (on testing dataset): 95.86%
In [92]:
predicts_ave = []
weights = [0.35, 0.375, 0.275]

for i in range(len(predicts1)):
    ave0 = predicts1[i][0]*weights[0] + predicts2[i][0]*weights[1] + predicts3[i][0]*weights[2]
    ave1 = predicts1[i][1]*weights[0] + predicts2[i][1]*weights[1] + predicts3[i][1]*weights[2]
    predicts_ave.append([ave0, ave1])

predicts_ave = np.array(predicts_ave)

# Prepare the classification output
# for the classification report
predout     = np.argmax(predicts_ave,axis=1)
testout     = np.argmax(tsLbl,axis=1)
labelname   = ['non-flower', 'flower']
                                            # the labels for the classfication report


testScores  = metrics.accuracy_score(testout,predout)
confusion   = metrics.confusion_matrix(testout,predout)


print("Best accuracy (on testing dataset): %.2f%%" % (testScores*100))
print(metrics.classification_report(testout,predout,target_names=labelname,digits=4))
print(confusion)
Best accuracy (on testing dataset): 95.86%
              precision    recall  f1-score   support

  non-flower     0.9336    0.9627    0.9479      1446
      flower     0.9755    0.9560    0.9657      2250

    accuracy                         0.9586      3696
   macro avg     0.9546    0.9593    0.9568      3696
weighted avg     0.9591    0.9586    0.9587      3696

[[1392   54]
 [  99 2151]]
In [ ]: